summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2010-02-26 19:04:15 +0000
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-02-26 19:06:24 +0000
commita7790532f5b7358c33a6b1834dc2b318de209f31 (patch)
tree0ceb9e24b3f54cb5c8453fb5a218e2a94a0f1cce /drivers
parent2764fb4244cc1bc08df3667924ca4a972e90ac70 (diff)
parent60b341b778cc2929df16c0a504c91621b3c6a4ad (diff)
downloadop-kernel-dev-a7790532f5b7358c33a6b1834dc2b318de209f31.zip
op-kernel-dev-a7790532f5b7358c33a6b1834dc2b318de209f31.tar.gz
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
The SmartMedia FTL code depends on new kfifo bits from 2.6.33
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_pad.c40
-rw-r--r--drivers/acpi/acpica/acnamesp.h9
-rw-r--r--drivers/acpi/acpica/acobject.h6
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dswload.c64
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c15
-rw-r--r--drivers/acpi/acpica/evxface.c4
-rw-r--r--drivers/acpi/acpica/evxfevnt.c4
-rw-r--r--drivers/acpi/acpica/evxfregn.c4
-rw-r--r--drivers/acpi/acpica/exmutex.c18
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nseval.c18
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c93
-rw-r--r--drivers/acpi/acpica/nsrepair.c447
-rw-r--r--drivers/acpi/acpica/nsrepair2.c195
-rw-r--r--drivers/acpi/acpica/nsutils.c57
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/nsxfname.c10
-rw-r--r--drivers/acpi/acpica/nsxfobj.c14
-rw-r--r--drivers/acpi/acpica/psxface.c3
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c27
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c14
-rw-r--r--drivers/acpi/bus.c165
-rw-r--r--drivers/acpi/button.c7
-rw-r--r--drivers/acpi/debug.c84
-rw-r--r--drivers/acpi/dock.c262
-rw-r--r--drivers/acpi/ec.c136
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/numa.c21
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pci_root.c78
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/power_meter.c4
-rw-r--r--drivers/acpi/processor_core.c81
-rw-r--r--drivers/acpi/processor_idle.c111
-rw-r--r--drivers/acpi/processor_pdc.c209
-rw-r--r--drivers/acpi/processor_perflib.c56
-rw-r--r--drivers/acpi/processor_thermal.c3
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/thermal.c7
-rw-r--r--drivers/acpi/video.c51
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-core.c38
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/libata-sff.c5
-rw-r--r--drivers/ata/pata_bf54x.c19
-rw-r--r--drivers/ata/pata_cmd64x.c118
-rw-r--r--drivers/ata/pata_hpt3x2n.c64
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/sata_mv.c144
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/memory.c82
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/main.c145
-rw-r--r--drivers/base/power/runtime.c55
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h9
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_nl.c19
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c49
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/mg_disk.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/bluetooth/Kconfig13
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c187
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/agp/amd64-agp.c20
-rw-r--r--drivers/char/agp/backend.c13
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/hw_random/core.c5
-rw-r--r--drivers/char/hw_random/virtio-rng.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c118
-rw-r--r--drivers/char/keyboard.c10
-rw-r--r--drivers/char/mem.c30
-rw-r--r--drivers/char/nozomi.c50
-rw-r--r--drivers/char/nwflash.c1
-rw-r--r--drivers/char/random.c9
-rw-r--r--drivers/char/sonypi.c60
-rw-r--r--drivers/char/toshiba.c12
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/char/uv_mmtimer.c18
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/connector/connector.c175
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpuidle/governors/menu.c12
-rw-r--r--drivers/crypto/padlock-sha.c23
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c1323
-rw-r--r--drivers/dma/coh901318_lli.c318
-rw-r--r--drivers/dma/coh901318_lli.h124
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c18
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h18
-rw-r--r--drivers/dma/ioat/dma_v2.c69
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c60
-rw-r--r--drivers/dma/ioat/registers.h1
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/dma/ppc4xx/Makefile1
-rw-r--r--drivers/dma/ppc4xx/adma.c5027
-rw-r--r--drivers/dma/ppc4xx/adma.h195
-rw-r--r--drivers/dma/ppc4xx/dma.h223
-rw-r--r--drivers/dma/ppc4xx/xor.h110
-rw-r--r--drivers/dma/shdma.c354
-rw-r--r--drivers/dma/shdma.h23
-rw-r--r--drivers/edac/amd64_edac.c62
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/Kconfig44
-rw-r--r--drivers/firewire/core-card.c41
-rw-r--r--drivers/firewire/core-cdev.c39
-rw-r--r--drivers/firewire/core-transaction.c118
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c21
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/adp5588-gpio.c266
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c29
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c66
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_ioc32.c89
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_mm.c7
-rw-r--r--drivers/gpu/drm/drm_modes.c90
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c5
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c5
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c33
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c272
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h139
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c132
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h14
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c35
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h40
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c732
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c81
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c116
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c826
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c264
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c62
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c216
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c161
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c225
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c121
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c306
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c678
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c30
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c16
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h801
-rw-r--r--drivers/gpu/drm/radeon/atom.c121
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h199
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c259
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c39
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c100
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h9
-rw-r--r--drivers/gpu/drm/radeon/r200.c17
-rw-r--r--drivers/gpu/drm/radeon/r300.c87
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r420.c48
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c214
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c266
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c28
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c87
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c506
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h74
-rw-r--r--drivers/gpu/drm/radeon/r600d.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c166
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c213
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c82
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r2002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420795
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60068
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5156
-rw-r--r--drivers/gpu/drm/radeon/rs400.c32
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c90
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c152
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c35
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c783
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h521
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c716
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c737
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c538
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c286
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c880
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c625
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1187
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-apple.c7
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-lg.h2
-rw-r--r--drivers/hid/hid-samsung.c25
-rw-r--r--drivers/hid/hid-wacom.c4
-rw-r--r--drivers/hwmon/Kconfig49
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/adt7462.c4
-rw-r--r--drivers/hwmon/amc6821.c1115
-rw-r--r--drivers/hwmon/asus_atk0110.c304
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/fschmd.c7
-rw-r--r--drivers/hwmon/k10temp.c223
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c183
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/smsc47m1.c153
-rw-r--r--drivers/hwmon/via-cputemp.c356
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c8
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c26
-rw-r--r--drivers/i2c/busses/i2c-omap.c13
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/i2c-core.c7
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/ieee1394/Kconfig59
-rw-r--r--drivers/infiniband/core/addr.c275
-rw-r--r--drivers/infiniband/core/cma.c129
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/ucma.c57
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c9
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c27
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/infiniband/hw/nes/Kconfig9
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c201
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h7
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c40
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h29
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c817
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c122
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/ff-memless.c48
-rw-r--r--drivers/input/input-polldev.c6
-rw-r--r--drivers/input/input.c97
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c29
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c29
-rw-r--r--drivers/input/joystick/iforce/iforce.h2
-rw-r--r--drivers/input/joystick/xpad.c4
-rw-r--r--drivers/input/keyboard/atkbd.c74
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c8
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c150
-rw-r--r--drivers/input/keyboard/matrix_keypad.c29
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c11
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c14
-rw-r--r--drivers/input/misc/winbond-cir.c2
-rw-r--r--drivers/input/misc/wistron_btns.c2
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/alps.c265
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/bcm5974.c44
-rw-r--r--drivers/input/mouse/hgpk.c1
-rw-r--r--drivers/input/mouse/lifebook.c8
-rw-r--r--drivers/input/mouse/psmouse-base.c83
-rw-r--r--drivers/input/mouse/sentelic.c6
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/altera_ps2.c15
-rw-r--r--drivers/input/serio/ambakmi.c9
-rw-r--r--drivers/input/serio/at32psif.c3
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h15
-rw-r--r--drivers/input/serio/i8042.c96
-rw-r--r--drivers/input/serio/sa1111ps2.c10
-rw-r--r--drivers/input/serio/serio.c11
-rw-r--r--drivers/input/tablet/wacom.h11
-rw-r--r--drivers/input/tablet/wacom_sys.c231
-rw-r--r--drivers/input/tablet/wacom_wac.c368
-rw-r--r--drivers/input/tablet/wacom_wac.h29
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ad7879.c197
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c258
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/leds-adp5520.c230
-rw-r--r--drivers/leds/leds-alix2.c115
-rw-r--r--drivers/leds/leds-cobalt-qube.c4
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-lt3593.c217
-rw-r--r--drivers/leds/leds-pwm.c5
-rw-r--r--drivers/leds/leds-regulator.c242
-rw-r--r--drivers/leds/leds-ss4200.c556
-rw-r--r--drivers/lguest/segments.c4
-rw-r--r--drivers/macintosh/smu.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/IR/ir-keytable.c2
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/common/tuners/tda8290.c12
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile14
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c12
-rw-r--r--drivers/media/dvb/frontends/Kconfig19
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/frontends/dib8000.h2
-rw-r--r--drivers/media/dvb/frontends/l64781.c4
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.h6
-rw-r--r--drivers/media/dvb/frontends/mb86a16.c1878
-rw-r--r--drivers/media/dvb/frontends/mb86a16.h52
-rw-r--r--drivers/media/dvb/frontends/mb86a16_priv.h151
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/dvb/frontends/tda665x.c257
-rw-r--r--drivers/media/dvb/frontends/tda665x.h52
-rw-r--r--drivers/media/dvb/mantis/Kconfig32
-rw-r--r--drivers/media/dvb/mantis/Makefile28
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c275
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.c88
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.c207
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c305
-rw-r--r--drivers/media/dvb/mantis/mantis_common.h179
-rw-r--r--drivers/media/dvb/mantis/mantis_core.c238
-rw-r--r--drivers/media/dvb/mantis/mantis_core.h57
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c256
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.c296
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.h35
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c117
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.c240
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.h29
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.c267
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c148
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c130
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.h51
-rw-r--r--drivers/media/dvb/mantis/mantis_link.h83
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c177
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_pcmcia.c120
-rw-r--r--drivers/media/dvb/mantis/mantis_reg.h197
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.h58
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.c212
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.c119
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c358
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.c187
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.h32
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.c38
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.c105
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.h30
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c8
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c44
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c6
-rw-r--r--drivers/media/video/gspca/ov534.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h1
-rw-r--r--drivers/media/video/gspca/sunplus.c26
-rw-r--r--drivers/media/video/gspca/vc032x.c4
-rw-r--r--drivers/media/video/meye.c60
-rw-r--r--drivers/media/video/meye.h4
-rw-r--r--drivers/media/video/mt9t112.c2
-rw-r--r--drivers/media/video/mx1_camera.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/rj54n1cb0c.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c13
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-ts.c13
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/uvc/uvc_queue.c13
-rw-r--r--drivers/media/video/uvc/uvc_video.c45
-rw-r--r--drivers/media/video/uvc/uvcvideo.h5
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_config.c13
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/asic3.c40
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/t7l66xb.c55
-rw-r--r--drivers/mfd/tc6387xb.c119
-rw-r--r--drivers/mfd/tc6393xb.c56
-rw-r--r--drivers/mfd/tmio_core.c52
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/wm8350-core.c3
-rw-r--r--drivers/mfd/wm8350-irq.c4
-rw-r--r--drivers/misc/Kconfig14
-rw-r--r--drivers/misc/enclosure.c1
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/mmc/card/queue.c18
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile6
-rw-r--r--drivers/mmc/host/sdhci-of-core.c (renamed from drivers/mmc/host/sdhci-of.c)143
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c143
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c65
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/tmio_mmc.c59
-rw-r--r--drivers/mmc/host/tmio_mmc.h46
-rw-r--r--drivers/mtd/maps/Kconfig17
-rw-r--r--drivers/mtd/maps/pismo.c320
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c13
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/excite_nandflash.c248
-rw-r--r--drivers/mtd/tests/mtd_readtest.c6
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c7
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c6
-rw-r--r--drivers/mtd/ubi/cdev.c1
-rw-r--r--drivers/mtd/ubi/kapi.c15
-rw-r--r--drivers/mtd/ubi/upd.c1
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/net/3c507.c4
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atlx/atl2.c7
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/bcm63xx_enet.c12
-rw-r--r--drivers/net/benet/be.h6
-rw-r--r--drivers/net/benet/be_cmds.c39
-rw-r--r--drivers/net/benet/be_cmds.h19
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/benet/be_main.c35
-rw-r--r--drivers/net/bfin_mac.c5
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c171
-rw-r--r--drivers/net/bonding/bond_alb.c38
-rw-r--r--drivers/net/bonding/bond_ipv6.c12
-rw-r--r--drivers/net/bonding/bond_main.c609
-rw-r--r--drivers/net/bonding/bond_sysfs.c327
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/mcp251x.c15
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c18
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/davinci_emac.c6
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_main.c62
-rw-r--r--drivers/net/e1000e/82571.c8
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/lib.c54
-rw-r--r--drivers/net/e1000e/netdev.c87
-rw-r--r--drivers/net/e1000e/phy.c85
-rw-r--r--drivers/net/fsl_pq_mdio.c30
-rw-r--r--drivers/net/gianfar.c63
-rw-r--r--drivers/net/gianfar.h18
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ibmlana.c3
-rw-r--r--drivers/net/igb/e1000_82575.c4
-rw-r--r--drivers/net/igb/e1000_phy.c9
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c33
-rw-r--r--drivers/net/igbvf/netdev.c18
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c65
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/sense.c2
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c193
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c71
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/octeon/Kconfig10
-rw-r--r--drivers/net/octeon/Makefile2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c1176
-rw-r--r--drivers/net/pcmcia/3c574_cs.c4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c4
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c4
-rw-r--r--drivers/net/pcnet32.c3
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c7
-rw-r--r--drivers/net/phy/mdio-octeon.c180
-rw-r--r--drivers/net/phy/mdio_bus.c72
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/phy/phy_device.c31
-rw-r--r--drivers/net/qlge/qlge_main.c15
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/efx.c7
-rw-r--r--drivers/net/sfc/falcon.c1
-rw-r--r--drivers/net/sfc/falcon_boards.c45
-rw-r--r--drivers/net/sfc/falcon_xmac.c38
-rw-r--r--drivers/net/sfc/mcdi.c14
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h4
-rw-r--r--drivers/net/sfc/mcdi_phy.c93
-rw-r--r--drivers/net/sfc/mtd.c5
-rw-r--r--drivers/net/sfc/net_driver.h1
-rw-r--r--drivers/net/sfc/nic.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c246
-rw-r--r--drivers/net/sfc/selftest.c10
-rw-r--r--drivers/net/sfc/siena.c1
-rw-r--r--drivers/net/sfc/tenxpress.c138
-rw-r--r--drivers/net/sfc/tx.c4
-rw-r--r--drivers/net/sh_eth.c9
-rw-r--r--drivers/net/sky2.c43
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/tg3.c27
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/dmfe.c21
-rw-r--r--drivers/net/tulip/tulip_core.c33
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/ucc_geth.c48
-rw-r--r--drivers/net/ucc_geth.h13
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/hso.c105
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/via-rhine.c41
-rw-r--r--drivers/net/via-velocity.c49
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vxge/vxge-main.c8
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wimax/i2400m/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c18
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c46
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/dma.c197
-rw-r--r--drivers/net/wireless/b43/dma.h7
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c68
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c4
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h4
-rw-r--r--drivers/net/wireless/libertas/main.c21
-rw-r--r--drivers/net/wireless/libertas/mesh.c4
-rw-r--r--drivers/net/wireless/libertas/scan.c22
-rw-r--r--drivers/net/wireless/libertas/wext.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/orinoco/wext.c6
-rw-r--r--drivers/net/wireless/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c140
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c16
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pci/intr_remapping.c2
-rw-r--r--drivers/pci/pci-acpi.c10
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c34
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug4
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c34
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c4
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c19
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c91
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c6
-rw-r--r--drivers/platform/x86/Kconfig70
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c5
-rw-r--r--drivers/platform/x86/acerhdf.c117
-rw-r--r--drivers/platform/x86/asus-laptop.c25
-rw-r--r--drivers/platform/x86/asus_acpi.c340
-rw-r--r--drivers/platform/x86/classmate-laptop.c609
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c86
-rw-r--r--drivers/platform/x86/dell-wmi.c171
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1491
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c38
-rw-r--r--drivers/platform/x86/hp-wmi.c157
-rw-r--r--drivers/platform/x86/msi-wmi.c288
-rw-r--r--drivers/platform/x86/sony-laptop.c153
-rw-r--r--drivers/platform/x86/tc1100-wmi.c115
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1211
-rw-r--r--drivers/platform/x86/toshiba_acpi.c259
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c144
-rw-r--r--drivers/platform/x86/wmi.c215
-rw-r--r--drivers/pnp/pnpacpi/core.c20
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c9
-rw-r--r--drivers/power/pmu_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/88pm8607.c685
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/ab3100.c33
-rw-r--r--drivers/regulator/core.c250
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/lp3971.c8
-rw-r--r--drivers/regulator/max8660.c510
-rw-r--r--drivers/regulator/mc13783-regulator.c245
-rw-r--r--drivers/regulator/mc13783.c410
-rw-r--r--drivers/regulator/twl-regulator.c147
-rw-r--r--drivers/regulator/wm831x-dcdc.c207
-rw-r--r--drivers/regulator/wm831x-ldo.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c4
-rw-r--r--drivers/rtc/rtc-cmos.c12
-rw-r--r--drivers/rtc/rtc-ds1305.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-fm3130.c6
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_diag.c42
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/block/dasd_ioctl.c21
-rw-r--r--drivers/s390/block/dasd_proc.c7
-rw-r--r--drivers/s390/char/con3215.c17
-rw-r--r--drivers/s390/char/fs3270.c19
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c3
-rw-r--r--drivers/s390/char/tape_block.c40
-rw-r--r--drivers/s390/char/tape_char.c21
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/vmcp.c12
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/chsc_sch.c23
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/device_pgid.c29
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio.h36
-rw-r--r--drivers/s390/cio/qdio_debug.c114
-rw-r--r--drivers/s390/cio/qdio_main.c76
-rw-r--r--drivers/s390/cio/qdio_perf.c147
-rw-r--r--drivers/s390/cio/qdio_perf.h61
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/cio/qdio_thinint.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c9
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_fc.c93
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/sbus/char/bbc_envctrl.c64
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c52
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c28
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c72
-rw-r--r--drivers/scsi/aacraid/dpcsup.c36
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c53
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c58
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c155
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/hpsa.c3531
-rw-r--r--drivers/scsi/hpsa.h273
-rw-r--r--drivers/scsi/hpsa_cmd.h326
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c68
-rw-r--r--drivers/scsi/libfc/fc_lport.c10
-rw-r--r--drivers/scsi/libfc/fc_rport.c3
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libiscsi_tcp.c36
-rw-r--r--drivers/scsi/libsrp.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rw-r--r--[-rwxr-xr-x]drivers/scsi/lpfc/lpfc_hbadisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c34
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c88
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c57
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h32
-rw-r--r--drivers/scsi/pmcraid.c42
-rw-r--r--drivers/scsi/pmcraid.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c152
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c22
-rw-r--r--drivers/scsi/sd.c107
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/st.c23
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/serial/21285.c4
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/serial/8250_pnp.c12
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/pmac_zilog.c11
-rw-r--r--drivers/serial/serial_core.c105
-rw-r--r--drivers/serial/serial_cs.c19
-rw-r--r--drivers/serial/sh-sci.c87
-rw-r--r--drivers/serial/sh-sci.h118
-rw-r--r--drivers/serial/uartlite.c2
-rw-r--r--drivers/sh/intc.c2
-rw-r--r--drivers/sh/pfc.c2
-rw-r--r--drivers/spi/Kconfig28
-rw-r--r--drivers/spi/Makefile10
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/spi/dw_spi.c944
-rw-r--r--drivers/spi/dw_spi_pci.c169
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c244
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.S116
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.h26
-rw-r--r--drivers/spi/spi_s3c64xx.c1196
-rw-r--r--drivers/spi/spi_sh_msiof.c15
-rw-r--r--drivers/spi/spi_sh_sci.c2
-rw-r--r--drivers/spi/spi_txx9.c6
-rw-r--r--drivers/spi/spidev.c18
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/asus_oled/asus_oled.c12
-rw-r--r--drivers/staging/batman-adv/Kconfig1
-rw-r--r--drivers/staging/batman-adv/send.c4
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c7
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c5
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c4
-rw-r--r--drivers/staging/dst/Kconfig67
-rw-r--r--drivers/staging/dst/Makefile3
-rw-r--r--drivers/staging/dst/crypto.c733
-rw-r--r--drivers/staging/dst/dcore.c1004
-rw-r--r--drivers/staging/dst/export.c660
-rw-r--r--drivers/staging/dst/state.c844
-rw-r--r--drivers/staging/dst/thread_pool.c348
-rw-r--r--drivers/staging/dst/trans.c337
-rw-r--r--drivers/staging/et131x/et1310_address_map.h18
-rw-r--r--drivers/staging/et131x/et1310_rx.c6
-rw-r--r--drivers/staging/hv/Hv.c50
-rw-r--r--drivers/staging/hv/Hv.h6
-rw-r--r--drivers/staging/hv/Vmbus.c12
-rw-r--r--drivers/staging/iio/ring_sw.h1
-rw-r--r--drivers/staging/octeon/Kconfig3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c204
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h2
-rw-r--r--drivers/staging/octeon/ethernet-proc.c112
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c52
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c2
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c2
-rw-r--r--drivers/staging/octeon/ethernet.c23
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h6
-rw-r--r--drivers/staging/panel/Kconfig2
-rw-r--r--drivers/staging/panel/panel.c4
-rw-r--r--drivers/staging/pohmelfs/dir.c2
-rw-r--r--drivers/staging/ramzswap/TODO1
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c28
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h10
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c14
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c10
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h12
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h12
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c10
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h8
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c16
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/sm7xx/Kconfig15
-rw-r--r--drivers/staging/sm7xx/Makefile3
-rw-r--r--drivers/staging/sm7xx/TODO10
-rw-r--r--drivers/staging/sm7xx/smtc2d.c979
-rw-r--r--drivers/staging/sm7xx/smtc2d.h530
-rw-r--r--drivers/staging/sm7xx/smtcfb.c1253
-rw-r--r--drivers/staging/sm7xx/smtcfb.h793
-rw-r--r--drivers/staging/vt6655/Kconfig2
-rw-r--r--drivers/staging/vt6656/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/thermal/thermal_sys.c19
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/core/hcd.c22
-rw-r--r--drivers/usb/core/hub.c76
-rw-r--r--drivers/usb/core/message.c8
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/usb/gadget/audio.c1
-rw-r--r--drivers/usb/gadget/f_audio.c15
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/gadget/u_audio.c12
-rw-r--r--drivers/usb/gadget/u_audio.h2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-hub.c33
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-q.c11
-rw-r--r--drivers/usb/host/fhci-hcd.c3
-rw-r--r--drivers/usb/host/fhci-sched.c10
-rw-r--r--drivers/usb/host/fhci-tds.c41
-rw-r--r--drivers/usb/host/fhci.h16
-rw-r--r--drivers/usb/host/isp1362-hcd.c25
-rw-r--r--drivers/usb/host/isp1760-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c58
-rw-r--r--drivers/usb/host/uhci-hcd.c15
-rw-r--r--drivers/usb/host/uhci-hub.c2
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/blackfin.c134
-rw-r--r--drivers/usb/musb/blackfin.h2
-rw-r--r--drivers/usb/musb/cppi_dma.c6
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/otg/isp1301_omap.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c26
-rw-r--r--drivers/usb/serial/ftdi_sio.h959
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1004
-rw-r--r--drivers/usb/serial/generic.c22
-rw-r--r--drivers/usb/serial/mos7840.c7
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/atafb.c3
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adx_bl.c2
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/cr_bllcd.c4
-rw-r--r--drivers/video/backlight/da903x_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c20
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/progear_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c11
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/wm831x_bl.c2
-rw-r--r--drivers/video/cyber2000fb.c12
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/mx3fb.c12
-rw-r--r--drivers/video/omap/dispc.c18
-rw-r--r--drivers/video/omap/lcd_htcherald.c4
-rw-r--r--drivers/video/omap/lcd_ldp.c4
-rw-r--r--drivers/video/omap/lcd_omap2evm.c10
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c2
-rw-r--r--drivers/video/omap/lcd_omap3evm.c10
-rw-r--r--drivers/video/omap/lcd_overo.c2
-rw-r--r--drivers/video/omap/omapfb.h2
-rw-r--r--drivers/video/omap/omapfb_main.c25
-rw-r--r--drivers/video/omap/rfbi.c4
-rw-r--r--drivers/video/omap2/dss/Kconfig7
-rw-r--r--drivers/video/omap2/dss/core.c10
-rw-r--r--drivers/video/omap2/dss/dispc.c74
-rw-r--r--drivers/video/omap2/dss/dsi.c159
-rw-r--r--drivers/video/omap2/dss/dss.c6
-rw-r--r--drivers/video/omap2/dss/dss.h14
-rw-r--r--drivers/video/omap2/dss/rfbi.c30
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c6
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--drivers/video/s3c-fb.c14
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c10
-rw-r--r--drivers/video/via/accel.c5
-rw-r--r--drivers/video/via/viafbdev.c15
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c4
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c2
-rw-r--r--drivers/watchdog/bfin_wdt.c13
-rw-r--r--drivers/watchdog/davinci_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c40
-rw-r--r--drivers/watchdog/iTCO_wdt.c49
-rw-r--r--drivers/watchdog/ixp2000_wdt.c1
-rw-r--r--drivers/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/watchdog/mv64x60_wdt.c2
-rw-r--r--drivers/watchdog/omap_wdt.c9
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/rm9k_wdt.c419
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c11
-rw-r--r--drivers/watchdog/txx9wdt.c6
-rw-r--r--drivers/xen/manage.c8
1232 files changed, 71594 insertions, 20321 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 8a07363..368ae6d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -28,7 +28,7 @@ source "drivers/md/Kconfig"
source "drivers/message/fusion/Kconfig"
-source "drivers/ieee1394/Kconfig"
+source "drivers/firewire/Kconfig"
source "drivers/message/i2o/Kconfig"
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d672cfe..cb423f5 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -21,7 +21,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index c7b10b4..66cc3f3 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -32,6 +32,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
acpi-y += bus.o glue.o
acpi-y += scan.o
+acpi-y += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 0d2cdb8..7e52295 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -100,7 +100,8 @@ static void round_robin_cpu(unsigned int tsk_index)
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_var_t tmp;
int cpu;
- unsigned long min_weight = -1, preferred_cpu;
+ unsigned long min_weight = -1;
+ unsigned long uninitialized_var(preferred_cpu);
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
@@ -207,7 +208,7 @@ static int power_saving_thread(void *data)
* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
- * CPU time. To make 'avoid staration' work, takes a nap here.
+ * CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (do_sleep)
schedule_timeout_killable(HZ * idle_pct / 100);
@@ -221,14 +222,18 @@ static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
+ int rc = -ENOMEM;
+
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"power_saving/%d", ps_tsk_num);
- if (ps_tsks[ps_tsk_num]) {
+ rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
+ if (!rc)
ps_tsk_num++;
- return 0;
- }
- return -EINVAL;
+ else
+ ps_tsks[ps_tsk_num] = NULL;
+
+ return rc;
}
static void destroy_power_saving_task(void)
@@ -236,6 +241,7 @@ static void destroy_power_saving_task(void)
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
+ ps_tsks[ps_tsk_num] = NULL;
}
}
@@ -252,7 +258,7 @@ static void set_power_saving_task_num(unsigned int num)
}
}
-static int acpi_pad_idle_cpus(unsigned int num_cpus)
+static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
get_online_cpus();
@@ -260,7 +266,6 @@ static int acpi_pad_idle_cpus(unsigned int num_cpus)
set_power_saving_task_num(num_cpus);
put_online_cpus();
- return 0;
}
static uint32_t acpi_pad_idle_cpus_num(void)
@@ -368,19 +373,21 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
union acpi_object *package;
int rev, num, ret = -EINVAL;
- status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
+ return -EINVAL;
+
+ if (!buffer.length || !buffer.pointer)
return -EINVAL;
+
package = buffer.pointer;
if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
goto out;
rev = package->package.elements[0].integer.value;
num = package->package.elements[1].integer.value;
- if (rev != 1)
+ if (rev != 1 || num < 0)
goto out;
*num_cpus = num;
ret = 0;
@@ -409,7 +416,7 @@ static void acpi_pad_ost(acpi_handle handle, int stat,
static void acpi_pad_handle_notify(acpi_handle handle)
{
- int num_cpus, ret;
+ int num_cpus;
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
@@ -417,12 +424,9 @@ static void acpi_pad_handle_notify(acpi_handle handle)
mutex_unlock(&isolated_cpus_lock);
return;
}
- ret = acpi_pad_idle_cpus(num_cpus);
+ acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
- if (!ret)
- acpi_pad_ost(handle, 0, idle_cpus);
- else
- acpi_pad_ost(handle, 1, 0);
+ acpi_pad_ost(handle, 0, idle_cpus);
mutex_unlock(&isolated_cpus_lock);
}
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index ab83919..61edb15 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -296,6 +296,11 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
acpi_status validate_status,
union acpi_operand_object **return_object_ptr);
+void
+acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
+ u8 package_type,
+ union acpi_operand_object *obj_desc);
+
/*
* nssearch - Namespace searching and entry
*/
@@ -354,9 +359,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
const char *internal_name,
u32 * converted_name_length, char **converted_name);
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle);
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node);
+struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle);
void acpi_ns_terminate(void);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index b39d682..64062b1 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -180,7 +180,11 @@ struct acpi_object_method {
u8 sync_level;
union acpi_operand_object *mutex;
u8 *aml_start;
- ACPI_INTERNAL_METHOD implementation;
+ union {
+ ACPI_INTERNAL_METHOD implementation;
+ union acpi_operand_object *handler;
+ } extra;
+
u32 aml_length;
u8 thread_count;
acpi_owner_id owner_id;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 567a489..e786f9f 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -414,7 +414,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
/* Invoke an internal method if necessary */
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = obj_desc->method.implementation(next_walk_state);
+ status = obj_desc->method.extra.implementation(next_walk_state);
if (status == AE_OK) {
status = AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 10fc785..b40513d 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -212,18 +212,19 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
case ACPI_TYPE_BUFFER:
/*
- * These types we will allow, but we will change the type. This
- * enables some existing code of the form:
+ * These types we will allow, but we will change the type.
+ * This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*
- * Note: silently change the type here. On the second pass, we will report
- * a warning
+ * Note: silently change the type here. On the second pass,
+ * we will report a warning
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
- path,
+ "Type override - [%4.4s] had invalid type (%s) "
+ "for Scope operator, changed to type ANY\n",
+ acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
@@ -235,8 +236,10 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */
ACPI_ERROR((AE_INFO,
- "Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
- acpi_ut_get_type_name(node->type), path));
+ "Invalid type (%s) for target of "
+ "Scope operator [%4.4s] (Cannot override)",
+ acpi_ut_get_type_name(node->type),
+ acpi_ut_get_node_name(node)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -697,15 +700,16 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_BUFFER:
/*
- * These types we will allow, but we will change the type. This
- * enables some existing code of the form:
+ * These types we will allow, but we will change the type.
+ * This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING((AE_INFO,
- "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
- buffer_ptr,
+ "Type override - [%4.4s] had invalid type (%s) "
+ "for Scope operator, changed to type ANY\n",
+ acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
@@ -717,9 +721,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* All other types are an error */
ACPI_ERROR((AE_INFO,
- "Invalid type (%s) for target of Scope operator [%4.4s]",
+ "Invalid type (%s) for target of "
+ "Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type),
- buffer_ptr));
+ acpi_ut_get_node_name(node)));
return (AE_AML_OPERAND_TYPE);
}
@@ -1047,9 +1052,22 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
/*
- * If we are executing a method, initialize the region
+ * The op_region is not fully parsed at this time. The only valid
+ * argument is the space_id. (We must save the address of the
+ * AML of the address and length operands)
+ *
+ * If we have a valid region, initialize it. The namespace is
+ * unlocked at this point.
+ *
+ * Need to unlock interpreter if it is locked (if we are running
+ * a control method), in order to allow _REG methods to be run
+ * during acpi_ev_initialize_region.
*/
if (walk_state->method_node) {
+ /*
+ * Executing a method: initialize the region and unlock
+ * the interpreter
+ */
status =
acpi_ex_create_region(op->named.data,
op->named.length,
@@ -1058,21 +1076,17 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
return (status);
}
- }
- /*
- * The op_region is not fully parsed at this time. Only valid
- * argument is the space_id. (We must save the address of the
- * AML of the address and length operands)
- */
+ acpi_ex_exit_interpreter();
+ }
- /*
- * If we have a valid region, initialize it
- * Namespace is NOT locked at this point.
- */
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
+ if (walk_state->method_node) {
+ acpi_ex_enter_interpreter();
+ }
+
if (ACPI_FAILURE(status)) {
/*
* If AE_NOT_EXIST is returned, it is not fatal
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 0bc807c..5336d91 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -718,7 +718,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
return (AE_BAD_PARAMETER);
}
@@ -1087,7 +1087,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index cf29c49..ff16805 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -575,6 +575,21 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
handler_obj = obj_desc->thermal_zone.handler;
break;
+ case ACPI_TYPE_METHOD:
+ /*
+ * If we are executing module level code, the original
+ * Node's object was replaced by this Method object and we
+ * saved the handler in the method object.
+ *
+ * See acpi_ns_exec_module_code
+ */
+ if (obj_desc->method.
+ flags & AOPOBJ_MODULE_LEVEL) {
+ handler_obj =
+ obj_desc->method.extra.handler;
+ }
+ break;
+
default:
/* Ignore other objects */
break;
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 10b8543..2fe0809 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -259,7 +259,7 @@ acpi_install_notify_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -425,7 +425,7 @@ acpi_remove_notify_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 4721f58..eed7a38 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -610,7 +610,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
return (status);
}
- node = acpi_ns_map_handle_to_node(gpe_device);
+ node = acpi_ns_validate_handle(gpe_device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -698,7 +698,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
return (status);
}
- node = acpi_ns_map_handle_to_node(gpe_device);
+ node = acpi_ns_validate_handle(gpe_device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 7c3d2d3..c98aa7c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -89,7 +89,7 @@ acpi_install_address_space_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -155,7 +155,7 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node ||
((node->type != ACPI_TYPE_DEVICE) &&
(node->type != ACPI_TYPE_PROCESSOR) &&
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 2f01142..3c456bd 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -375,6 +375,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
+ /* Must have a valid thread ID */
+
+ if (!walk_state->thread) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot release Mutex [%4.4s], null thread info",
+ acpi_ut_get_node_name(obj_desc->mutex.node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
/*
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
@@ -392,15 +401,6 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
- /* Must have a valid thread ID */
-
- if (!walk_state->thread) {
- ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], null thread info",
- acpi_ut_get_node_name(obj_desc->mutex.node)));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
/*
* The sync level of the mutex must be equal to the current sync level. In
* other words, the current level means that at least one mutex at that
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 9c3cdbe..d622ba7 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -165,7 +165,7 @@ acpi_status acpi_ns_root_initialize(void)
obj_desc->method.method_flags =
AML_METHOD_INTERNAL_ONLY;
- obj_desc->method.implementation =
+ obj_desc->method.extra.implementation =
acpi_ut_osi_implementation;
#endif
break;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 2deb986..e37836e 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -180,7 +180,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
return (AE_OK);
}
- this_node = acpi_ns_map_handle_to_node(obj_handle);
+ this_node = acpi_ns_validate_handle(obj_handle);
if (!this_node) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid object handle %p\n",
obj_handle));
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f771e97..af9fe91 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -381,6 +381,18 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
method_obj->method.next_object);
type = acpi_ns_get_type(parent_node);
+ /*
+ * Get the region handler and save it in the method object. We may need
+ * this if an operation region declaration causes a _REG method to be run.
+ *
+ * We can't do this in acpi_ps_link_module_code because
+ * acpi_gbl_root_node->Object is NULL at PASS1.
+ */
+ if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
+ method_obj->method.extra.handler =
+ parent_node->object->device.handler;
+ }
+
/* Must clear next_object (acpi_ns_attach_object needs the field) */
method_obj->method.next_object = NULL;
@@ -415,6 +427,12 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
method_obj->method.aml_start));
+ /* Delete a possible implicit return value (in slack mode) */
+
+ if (info->return_object) {
+ acpi_ut_remove_reference(info->return_object);
+ }
+
/* Detach the temporary method object */
acpi_ns_detach_object(parent_node);
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index af8e6bc..8f9a487 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -232,7 +232,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
- node = acpi_ns_map_handle_to_node(target_handle);
+ node = acpi_ns_validate_handle(target_handle);
if (!node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index b05f429..d34fa59 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -216,29 +216,38 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
data->pathname = pathname;
/*
- * Check that the type of the return object is what is expected for
- * this predefined name
+ * Check that the type of the main return object is what is expected
+ * for this predefined name
*/
status = acpi_ns_check_object_type(data, return_object_ptr,
predefined->info.expected_btypes,
ACPI_NOT_PACKAGE_ELEMENT);
if (ACPI_FAILURE(status)) {
- goto check_validation_status;
+ goto exit;
}
- /* For returned Package objects, check the type of all sub-objects */
-
- if (return_object->common.type == ACPI_TYPE_PACKAGE) {
+ /*
+ * For returned Package objects, check the type of all sub-objects.
+ * Note: Package may have been newly created by call above.
+ */
+ if ((*return_object_ptr)->common.type == ACPI_TYPE_PACKAGE) {
status = acpi_ns_check_package(data, return_object_ptr);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
}
/*
- * Perform additional, more complicated repairs on a per-name
- * basis.
+ * The return object was OK, or it was successfully repaired above.
+ * Now make some additional checks such as verifying that package
+ * objects are sorted correctly (if required) or buffer objects have
+ * the correct data width (bytes vs. dwords). These repairs are
+ * performed on a per-name basis, i.e., the code is specific to
+ * particular predefined names.
*/
status = acpi_ns_complex_repairs(data, node, status, return_object_ptr);
-check_validation_status:
+exit:
/*
* If the object validation failed or if we successfully repaired one
* or more objects, mark the parent node to suppress further warning
@@ -427,6 +436,13 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
data->pathname, package->ret_info.type,
return_object->package.count));
+ /*
+ * For variable-length Packages, we can safely remove all embedded
+ * and trailing NULL package elements
+ */
+ acpi_ns_remove_null_elements(data, package->ret_info.type,
+ return_object);
+
/* Extract package count and elements array */
elements = return_object->package.elements;
@@ -461,11 +477,11 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
if (count < expected_count) {
goto package_too_small;
} else if (count > expected_count) {
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Return Package is larger than needed - "
- "found %u, expected %u", count,
- expected_count));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ data->pathname, count,
+ expected_count));
}
/* Validate all elements of the returned package */
@@ -680,53 +696,18 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
union acpi_operand_object *sub_package;
union acpi_operand_object **sub_elements;
acpi_status status;
- u8 non_trailing_null = FALSE;
u32 expected_count;
u32 i;
u32 j;
- /* Validate each sub-Package in the parent Package */
-
+ /*
+ * Validate each sub-Package in the parent Package
+ *
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
+ */
for (i = 0; i < count; i++) {
- /*
- * Handling for NULL package elements. For now, we will simply allow
- * a parent package with trailing NULL elements. This can happen if
- * the package was defined to be longer than the initializer list.
- * This is legal as per the ACPI specification. It is often used
- * to allow for dynamic initialization of a Package.
- *
- * A future enhancement may be to simply truncate the package to
- * remove the trailing NULL elements.
- */
- if (!(*elements)) {
- if (!non_trailing_null) {
-
- /* Ensure the remaining elements are all NULL */
-
- for (j = 1; j < (count - i + 1); j++) {
- if (elements[j]) {
- non_trailing_null = TRUE;
- }
- }
-
- if (!non_trailing_null) {
-
- /* Ignore the trailing NULL elements */
-
- return (AE_OK);
- }
- }
-
- /* There are trailing non-null elements, issue warning */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Found NULL element at package index %u",
- i));
- elements++;
- continue;
- }
-
sub_package = *elements;
sub_elements = sub_package->package.elements;
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d563f1a..4fd1bdb 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -45,13 +45,52 @@
#include "accommon.h"
#include "acnamesp.h"
#include "acinterp.h"
-#include "acpredef.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair")
/*******************************************************************************
*
+ * This module attempts to repair or convert objects returned by the
+ * predefined methods to an object type that is expected, as per the ACPI
+ * specification. The need for this code is dictated by the many machines that
+ * return incorrect types for the standard predefined methods. Performing these
+ * conversions here, in one place, eliminates the need for individual ACPI
+ * device drivers to do the same. Note: Most of these conversions are different
+ * than the internal object conversion routines used for implicit object
+ * conversion.
+ *
+ * The following conversions can be performed as necessary:
+ *
+ * Integer -> String
+ * Integer -> Buffer
+ * String -> Integer
+ * String -> Buffer
+ * Buffer -> Integer
+ * Buffer -> String
+ * Buffer -> Package of Integers
+ * Package -> Package of one Package
+ *
+ ******************************************************************************/
+/* Local prototypes */
+static acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_package(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_object
*
* PARAMETERS: Data - Pointer to validation data structure
@@ -68,6 +107,7 @@ ACPI_MODULE_NAME("nsrepair")
* not expected.
*
******************************************************************************/
+
acpi_status
acpi_ns_repair_object(struct acpi_predefined_data *data,
u32 expected_btypes,
@@ -76,32 +116,206 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
union acpi_operand_object *new_object;
- acpi_size length;
acpi_status status;
+ ACPI_FUNCTION_NAME(ns_repair_object);
+
/*
* At this point, we know that the type of the returned object was not
* one of the expected types for this predefined name. Attempt to
- * repair the object. Only a limited number of repairs are possible.
+ * repair the object by converting it to one of the expected object
+ * types for this predefined name.
*/
- switch (return_object->common.type) {
+ if (expected_btypes & ACPI_RTYPE_INTEGER) {
+ status = acpi_ns_convert_to_integer(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_STRING) {
+ status = acpi_ns_convert_to_string(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_BUFFER) {
+ status = acpi_ns_convert_to_buffer(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_PACKAGE) {
+ status = acpi_ns_convert_to_package(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+
+ /* We cannot repair this object */
+
+ return (AE_AML_OPERAND_TYPE);
+
+ object_repaired:
+
+ /* Object was successfully repaired */
+
+ /*
+ * If the original object is a package element, we need to:
+ * 1. Set the reference count of the new object to match the
+ * reference count of the old object.
+ * 2. Decrement the reference count of the original object.
+ */
+ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ new_object->common.reference_count =
+ return_object->common.reference_count;
+
+ if (return_object->common.reference_count > 1) {
+ return_object->common.reference_count--;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Converted %s to expected %s at index %u\n",
+ data->pathname,
+ acpi_ut_get_object_type_name(return_object),
+ acpi_ut_get_object_type_name(new_object),
+ package_index));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Converted %s to expected %s\n",
+ data->pathname,
+ acpi_ut_get_object_type_name(return_object),
+ acpi_ut_get_object_type_name(new_object)));
+ }
+
+ /* Delete old object, install the new return object */
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_object;
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_integer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ u64 value = 0;
+ u32 i;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_STRING:
+
+ /* String-to-Integer conversion */
+
+ status = acpi_ut_strtoul64(original_object->string.pointer,
+ ACPI_ANY_BASE, &value);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
case ACPI_TYPE_BUFFER:
- /* Does the method/object legally return a string? */
+ /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
- if (!(expected_btypes & ACPI_RTYPE_STRING)) {
+ if (original_object->buffer.length > 8) {
return (AE_AML_OPERAND_TYPE);
}
+ /* Extract each buffer byte to create the integer */
+
+ for (i = 0; i < original_object->buffer.length; i++) {
+ value |=
+ ((u64) original_object->buffer.
+ pointer[i] << (i * 8));
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ new_object = acpi_ut_create_integer_object(value);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_string
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_size length;
+ acpi_status status;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-String conversion. Commonly, convert
+ * an integer of value 0 to a NULL string. The last element of
+ * _BIF and _BIX packages occasionally need this fix.
+ */
+ if (original_object->integer.value == 0) {
+
+ /* Allocate a new NULL string object */
+
+ new_object = acpi_ut_create_string_object(0);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+ } else {
+ status =
+ acpi_ex_convert_to_string(original_object,
+ &new_object,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
/*
- * Have a Buffer, expected a String, convert. Use a to_string
+ * Buffer-to-String conversion. Use a to_string
* conversion, no transform performed on the buffer data. The best
* example of this is the _BIF method, where the string data from
* the battery is often (incorrectly) returned as buffer object(s).
*/
length = 0;
- while ((length < return_object->buffer.length) &&
- (return_object->buffer.pointer[length])) {
+ while ((length < original_object->buffer.length) &&
+ (original_object->buffer.pointer[length])) {
length++;
}
@@ -117,94 +331,176 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
* terminated at Length+1.
*/
ACPI_MEMCPY(new_object->string.pointer,
- return_object->buffer.pointer, length);
+ original_object->buffer.pointer, length);
break;
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_buffer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ union acpi_operand_object **elements;
+ u32 *dword_buffer;
+ u32 count;
+ u32 i;
+
+ switch (original_object->common.type) {
case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-Buffer conversion.
+ * Convert the Integer to a packed-byte buffer. _MAT and other
+ * objects need this sometimes, if a read has been performed on a
+ * Field object that is less than or equal to the global integer
+ * size (32 or 64 bits).
+ */
+ status =
+ acpi_ex_convert_to_buffer(original_object, &new_object);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
- /* 1) Does the method/object legally return a buffer? */
+ case ACPI_TYPE_STRING:
- if (expected_btypes & ACPI_RTYPE_BUFFER) {
- /*
- * Convert the Integer to a packed-byte buffer. _MAT needs
- * this sometimes, if a read has been performed on a Field
- * object that is less than or equal to the global integer
- * size (32 or 64 bits).
- */
- status =
- acpi_ex_convert_to_buffer(return_object,
- &new_object);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* String-to-Buffer conversion. Simple data copy */
+
+ new_object =
+ acpi_ut_create_buffer_object(original_object->string.
+ length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- /* 2) Does the method/object legally return a string? */
+ ACPI_MEMCPY(new_object->buffer.pointer,
+ original_object->string.pointer,
+ original_object->string.length);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * This case is often seen for predefined names that must return a
+ * Buffer object with multiple DWORD integers within. For example,
+ * _FDE and _GTM. The Package can be converted to a Buffer.
+ */
+
+ /* All elements of the Package must be integers */
- else if (expected_btypes & ACPI_RTYPE_STRING) {
- /*
- * The only supported Integer-to-String conversion is to convert
- * an integer of value 0 to a NULL string. The last element of
- * _BIF and _BIX packages occasionally need this fix.
- */
- if (return_object->integer.value != 0) {
+ elements = original_object->package.elements;
+ count = original_object->package.count;
+
+ for (i = 0; i < count; i++) {
+ if ((!*elements) ||
+ ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
return (AE_AML_OPERAND_TYPE);
}
+ elements++;
+ }
- /* Allocate a new NULL string object */
+ /* Create the new buffer object to replace the Package */
- new_object = acpi_ut_create_string_object(0);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
- } else {
- return (AE_AML_OPERAND_TYPE);
+ new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- break;
- default:
+ /* Copy the package elements (integers) to the buffer as DWORDs */
- /* We cannot repair this object */
+ elements = original_object->package.elements;
+ dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
+
+ for (i = 0; i < count; i++) {
+ *dword_buffer = (u32) (*elements)->integer.value;
+ dword_buffer++;
+ elements++;
+ }
+ break;
+ default:
return (AE_AML_OPERAND_TYPE);
}
- /* Object was successfully repaired */
+ *return_object = new_object;
+ return (AE_OK);
+}
- /*
- * If the original object is a package element, we need to:
- * 1. Set the reference count of the new object to match the
- * reference count of the old object.
- * 2. Decrement the reference count of the original object.
- */
- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
- new_object->common.reference_count =
- return_object->common.reference_count;
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_package
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
+ * the buffer is converted to a single integer package element.
+ *
+ ******************************************************************************/
- if (return_object->common.reference_count > 1) {
- return_object->common.reference_count--;
+static acpi_status
+acpi_ns_convert_to_package(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ union acpi_operand_object **elements;
+ u32 length;
+ u8 *buffer;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ /* Buffer-to-Package conversion */
+
+ length = original_object->buffer.length;
+ new_object = acpi_ut_create_package_object(length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Converted %s to expected %s at index %u",
- acpi_ut_get_object_type_name
- (return_object),
- acpi_ut_get_object_type_name(new_object),
- package_index));
- } else {
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Converted %s to expected %s",
- acpi_ut_get_object_type_name
- (return_object),
- acpi_ut_get_object_type_name
- (new_object)));
- }
+ /* Convert each buffer byte to an integer package element */
- /* Delete old object, install the new return object */
+ elements = new_object->package.elements;
+ buffer = original_object->buffer.pointer;
- acpi_ut_remove_reference(return_object);
- *return_object_ptr = new_object;
- data->flags |= ACPI_OBJECT_REPAIRED;
+ while (length--) {
+ *elements =
+ acpi_ut_create_integer_object((u64) *buffer);
+ if (!*elements) {
+ acpi_ut_remove_reference(new_object);
+ return (AE_NO_MEMORY);
+ }
+ elements++;
+ buffer++;
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
return (AE_OK);
}
@@ -238,6 +534,8 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data,
{
union acpi_operand_object *pkg_obj_desc;
+ ACPI_FUNCTION_NAME(ns_repair_package_list);
+
/*
* Create the new outer package and populate it. The new package will
* have a single element, the lone subpackage.
@@ -254,8 +552,9 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data,
*obj_desc_ptr = pkg_obj_desc;
data->flags |= ACPI_OBJECT_REPAIRED;
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Repaired Incorrectly formed Package"));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Repaired incorrectly formed Package\n",
+ data->pathname));
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d07b686..f13691c 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -45,6 +45,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include "acpredef.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair2")
@@ -74,6 +75,10 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_FDE(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_PSS(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
@@ -89,9 +94,6 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
u8 sort_direction, char *sort_key_name);
static acpi_status
-acpi_ns_remove_null_elements(union acpi_operand_object *package);
-
-static acpi_status
acpi_ns_sort_list(union acpi_operand_object **elements,
u32 count, u32 index, u8 sort_direction);
@@ -104,17 +106,27 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* This table contains the names of the predefined methods for which we can
* perform more complex repairs.
*
- * _ALR: Sort the list ascending by ambient_illuminance if necessary
- * _PSS: Sort the list descending by Power if necessary
- * _TSS: Sort the list descending by Power if necessary
+ * As necessary:
+ *
+ * _ALR: Sort the list ascending by ambient_illuminance
+ * _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _PSS: Sort the list descending by Power
+ * _TSS: Sort the list descending by Power
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},
+ {"_FDE", acpi_ns_repair_FDE},
+ {"_GTM", acpi_ns_repair_FDE}, /* _GTM has same repair as _FDE */
{"_PSS", acpi_ns_repair_PSS},
{"_TSS", acpi_ns_repair_TSS},
{{0, 0, 0, 0}, NULL} /* Table terminator */
};
+#define ACPI_FDE_FIELD_COUNT 5
+#define ACPI_FDE_BYTE_BUFFER_SIZE 5
+#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * sizeof (u32))
+
/******************************************************************************
*
* FUNCTION: acpi_ns_complex_repairs
@@ -215,6 +227,94 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
/******************************************************************************
*
+ * FUNCTION: acpi_ns_repair_FDE
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _FDE and _GTM objects. The expected return
+ * value is a Buffer of 5 DWORDs. This function repairs a common
+ * problem where the return value is a Buffer of BYTEs, not
+ * DWORDs.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_FDE(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object *buffer_object;
+ u8 *byte_buffer;
+ u32 *dword_buffer;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_repair_FDE);
+
+ switch (return_object->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ /* This is the expected type. Length should be (at least) 5 DWORDs */
+
+ if (return_object->buffer.length >= ACPI_FDE_DWORD_BUFFER_SIZE) {
+ return (AE_OK);
+ }
+
+ /* We can only repair if we have exactly 5 BYTEs */
+
+ if (return_object->buffer.length != ACPI_FDE_BYTE_BUFFER_SIZE) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ data->node_flags,
+ "Incorrect return buffer length %u, expected %u",
+ return_object->buffer.length,
+ ACPI_FDE_DWORD_BUFFER_SIZE));
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /* Create the new (larger) buffer object */
+
+ buffer_object =
+ acpi_ut_create_buffer_object(ACPI_FDE_DWORD_BUFFER_SIZE);
+ if (!buffer_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Expand each byte to a DWORD */
+
+ byte_buffer = return_object->buffer.pointer;
+ dword_buffer =
+ ACPI_CAST_PTR(u32, buffer_object->buffer.pointer);
+
+ for (i = 0; i < ACPI_FDE_FIELD_COUNT; i++) {
+ *dword_buffer = (u32) *byte_buffer;
+ dword_buffer++;
+ byte_buffer++;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s Expanded Byte Buffer to expected DWord Buffer\n",
+ data->pathname));
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /* Delete the original return object, return the new buffer object */
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = buffer_object;
+
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_TSS
*
* PARAMETERS: Data - Pointer to validation data structure
@@ -345,6 +445,8 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
u32 previous_value;
acpi_status status;
+ ACPI_FUNCTION_NAME(ns_check_sorted_list);
+
/* The top-level object must be a package */
if (return_object->common.type != ACPI_TYPE_PACKAGE) {
@@ -352,24 +454,10 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
}
/*
- * Detect any NULL package elements and remove them from the
- * package.
- *
- * TBD: We may want to do this for all predefined names that
- * return a variable-length package of packages.
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
*/
- status = acpi_ns_remove_null_elements(return_object);
- if (status == AE_NULL_ENTRY) {
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "NULL elements removed from package"));
-
- /* Exit if package is now zero length */
-
- if (!return_object->package.count) {
- return (AE_NULL_ENTRY);
- }
- }
-
outer_elements = return_object->package.elements;
outer_element_count = return_object->package.count;
if (!outer_element_count) {
@@ -422,10 +510,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
data->flags |= ACPI_OBJECT_REPAIRED;
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Repaired unsorted list - now sorted by %s",
- sort_key_name));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Repaired unsorted list - now sorted by %s\n",
+ data->pathname, sort_key_name));
return (AE_OK);
}
@@ -440,36 +527,63 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_remove_null_elements
*
- * PARAMETERS: obj_desc - A Package object
+ * PARAMETERS: Data - Pointer to validation data structure
+ * package_type - An acpi_return_package_types value
+ * obj_desc - A Package object
*
- * RETURN: Status. AE_NULL_ENTRY means that one or more elements were
- * removed.
+ * RETURN: None.
*
- * DESCRIPTION: Remove all NULL package elements and update the package count.
+ * DESCRIPTION: Remove all NULL package elements from packages that contain
+ * a variable number of sub-packages.
*
*****************************************************************************/
-static acpi_status
-acpi_ns_remove_null_elements(union acpi_operand_object *obj_desc)
+void
+acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
+ u8 package_type,
+ union acpi_operand_object *obj_desc)
{
union acpi_operand_object **source;
union acpi_operand_object **dest;
- acpi_status status = AE_OK;
u32 count;
u32 new_count;
u32 i;
+ ACPI_FUNCTION_NAME(ns_remove_null_elements);
+
+ /*
+ * PTYPE1 packages contain no subpackages.
+ * PTYPE2 packages contain a variable number of sub-packages. We can
+ * safely remove all NULL elements from the PTYPE2 packages.
+ */
+ switch (package_type) {
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_VAR:
+ case ACPI_PTYPE1_OPTION:
+ return;
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_PKG_COUNT:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_REV_FIXED:
+ break;
+
+ default:
+ return;
+ }
+
count = obj_desc->package.count;
new_count = count;
source = obj_desc->package.elements;
dest = source;
- /* Examine all elements of the package object */
+ /* Examine all elements of the package object, remove nulls */
for (i = 0; i < count; i++) {
if (!*source) {
- status = AE_NULL_ENTRY;
new_count--;
} else {
*dest = *source;
@@ -478,15 +592,18 @@ acpi_ns_remove_null_elements(union acpi_operand_object *obj_desc)
source++;
}
- if (status == AE_NULL_ENTRY) {
+ /* Update parent package if any null elements were removed */
+
+ if (new_count < count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Found and removed %u NULL elements\n",
+ data->pathname, (count - new_count)));
/* NULL terminate list and update the package count */
*dest = NULL;
obj_desc->package.count = new_count;
}
-
- return (status);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index ea55ab4..47d91e6 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -671,24 +671,25 @@ acpi_ns_externalize_name(u32 internal_name_length,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_map_handle_to_node
+ * FUNCTION: acpi_ns_validate_handle
*
- * PARAMETERS: Handle - Handle to be converted to an Node
+ * PARAMETERS: Handle - Handle to be validated and typecast to a
+ * namespace node.
*
- * RETURN: A Name table entry pointer
+ * RETURN: A pointer to a namespace node
*
- * DESCRIPTION: Convert a namespace handle to a real Node
+ * DESCRIPTION: Convert a namespace handle to a namespace node. Handles special
+ * cases for the root node.
*
- * Note: Real integer handles would allow for more verification
+ * NOTE: Real integer handles would allow for more verification
* and keep all pointers within this subsystem - however this introduces
- * more (and perhaps unnecessary) overhead.
- *
- * The current implemenation is basically a placeholder until such time comes
- * that it is needed.
+ * more overhead and has not been necessary to this point. Drivers
+ * holding handles are typically notified before a node becomes invalid
+ * due to a table unload.
*
******************************************************************************/
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
+struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
{
ACPI_FUNCTION_ENTRY();
@@ -710,42 +711,6 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_convert_entry_to_handle
- *
- * PARAMETERS: Node - Node to be converted to a Handle
- *
- * RETURN: A user handle
- *
- * DESCRIPTION: Convert a real Node to a namespace handle
- *
- ******************************************************************************/
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node)
-{
-
- /*
- * Simple implementation for now;
- */
- return ((acpi_handle) node);
-
-/* Example future implementation ---------------------
-
- if (!Node)
- {
- return (NULL);
- }
-
- if (Node == acpi_gbl_root_node)
- {
- return (ACPI_ROOT_OBJECT);
- }
-
- return ((acpi_handle) Node);
-------------------------------------------------------*/
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_terminate
*
* PARAMETERS: none
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index f2bd1da..f0c0892 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -190,7 +190,7 @@ acpi_evaluate_object(acpi_handle handle,
/* Convert and validate the device handle */
- info->prefix_node = acpi_ns_map_handle_to_node(handle);
+ info->prefix_node = acpi_ns_validate_handle(handle);
if (!info->prefix_node) {
status = AE_BAD_PARAMETER;
goto cleanup;
@@ -552,7 +552,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
return (status);
}
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
@@ -729,7 +729,7 @@ acpi_attach_data(acpi_handle obj_handle,
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -775,7 +775,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -822,7 +822,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index ddc84af..e611dd9 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -93,7 +93,7 @@ acpi_get_handle(acpi_handle parent,
/* Convert a parent handle to a prefix node */
if (parent) {
- prefix_node = acpi_ns_map_handle_to_node(parent);
+ prefix_node = acpi_ns_validate_handle(parent);
if (!prefix_node) {
return (AE_BAD_PARAMETER);
}
@@ -114,7 +114,7 @@ acpi_get_handle(acpi_handle parent,
if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
*ret_handle =
- acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
+ ACPI_CAST_PTR(acpi_handle, acpi_gbl_root_node);
return (AE_OK);
}
} else if (!prefix_node) {
@@ -129,7 +129,7 @@ acpi_get_handle(acpi_handle parent,
status =
acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
if (ACPI_SUCCESS(status)) {
- *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
return (status);
@@ -186,7 +186,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -291,7 +291,7 @@ acpi_get_object_info(acpi_handle handle,
goto cleanup;
}
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 4071bad..0cc6ba0 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -79,7 +79,7 @@ acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
@@ -132,7 +132,7 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
@@ -182,7 +182,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -191,7 +191,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */
parent_node = acpi_ns_get_parent_node(node);
- *ret_handle = acpi_ns_convert_entry_to_handle(parent_node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, parent_node);
/* Return exception if parent is null */
@@ -251,7 +251,7 @@ acpi_get_next_object(acpi_object_type type,
/* Start search at the beginning of the specified scope */
- parent_node = acpi_ns_map_handle_to_node(parent);
+ parent_node = acpi_ns_validate_handle(parent);
if (!parent_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -260,7 +260,7 @@ acpi_get_next_object(acpi_object_type type,
/* Non-null handle, ignore the parent */
/* Convert and validate the handle */
- child_node = acpi_ns_map_handle_to_node(child);
+ child_node = acpi_ns_validate_handle(child);
if (!child_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -276,7 +276,7 @@ acpi_get_next_object(acpi_object_type type,
}
if (ret_handle) {
- *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
unlock_and_exit:
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 12934ad..d0c1b91 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -287,7 +287,8 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* Invoke an internal method if necessary */
if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = info->obj_desc->method.implementation(walk_state);
+ status =
+ info->obj_desc->method.extra.implementation(walk_state);
info->return_object = walk_state->return_desc;
/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 395212b..f27feb4 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -104,7 +104,7 @@ acpi_rs_validate_parameters(acpi_handle device_handle,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- node = acpi_ns_map_handle_to_node(device_handle);
+ node = acpi_ns_validate_handle(device_handle);
if (!node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 0f0c64b..f857c5e 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -323,11 +323,11 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
* RETURN: Status
*
* DESCRIPTION: This function is called to place a package object in a user
- * buffer. A package object by definition contains other objects.
+ * buffer. A package object by definition contains other objects.
*
* The buffer is assumed to have sufficient space for the object.
- * The caller must have verified the buffer length needed using the
- * acpi_ut_get_object_size function before calling this function.
+ * The caller must have verified the buffer length needed using
+ * the acpi_ut_get_object_size function before calling this function.
*
******************************************************************************/
@@ -382,12 +382,12 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
* FUNCTION: acpi_ut_copy_iobject_to_eobject
*
* PARAMETERS: internal_object - The internal object to be converted
- * buffer_ptr - Where the object is returned
+ * ret_buffer - Where the object is returned
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to build an API object to be returned to
- * the caller.
+ * DESCRIPTION: This function is called to build an API object to be returned
+ * to the caller.
*
******************************************************************************/
@@ -626,7 +626,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
- * RETURN: Status - the status of the call
+ * RETURN: Status
*
* DESCRIPTION: Converts an external object to an internal object.
*
@@ -665,7 +665,7 @@ acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
*
* RETURN: Status
*
- * DESCRIPTION: Simple copy of one internal object to another. Reference count
+ * DESCRIPTION: Simple copy of one internal object to another. Reference count
* of the destination object is preserved.
*
******************************************************************************/
@@ -897,10 +897,11 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
*
* FUNCTION: acpi_ut_copy_ipackage_to_ipackage
*
- * PARAMETERS: *source_obj - Pointer to the source package object
- * *dest_obj - Where the internal object is returned
+ * PARAMETERS: source_obj - Pointer to the source package object
+ * dest_obj - Where the internal object is returned
+ * walk_state - Current Walk state descriptor
*
- * RETURN: Status - the status of the call
+ * RETURN: Status
*
* DESCRIPTION: This function is called to copy an internal package object
* into another internal package object.
@@ -953,9 +954,9 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
*
* FUNCTION: acpi_ut_copy_iobject_to_iobject
*
- * PARAMETERS: walk_state - Current walk state
- * source_desc - The internal object to be copied
+ * PARAMETERS: source_desc - The internal object to be copied
* dest_desc - Where the copied object is returned
+ * walk_state - Current walk state
*
* RETURN: Status
*
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 3f4602b..cada73f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -831,7 +831,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
dev_name(&device->dev), event,
acpi_battery_present(battery));
#ifdef CONFIG_ACPI_SYSFS_POWER
- /* acpi_batter_update could remove power_supply object */
+ /* acpi_battery_update could remove power_supply object */
if (battery->bat.dev)
kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE);
#endif
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 23e5a05..2815df6 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -185,6 +185,12 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
acpi_osi_setup("!Windows 2006");
return 0;
}
+static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2009");
+ return 0;
+}
static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
{
@@ -211,6 +217,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
},
},
+ {
+ .callback = dmi_disable_osi_win7,
+ .ident = "ASUS K50IJ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 7411915..a52126e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -344,6 +344,167 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
EXPORT_SYMBOL(acpi_bus_can_wakeup);
+static void acpi_print_osc_error(acpi_handle handle,
+ struct acpi_osc_context *context, char *error)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
+ int i;
+
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
+ printk(KERN_DEBUG "%s\n", error);
+ else {
+ printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
+ kfree(buffer.pointer);
+ }
+ printk(KERN_DEBUG"_OSC request data:");
+ for (i = 0; i < context->cap.length; i += sizeof(u32))
+ printk("%x ", *((u32 *)(context->cap.pointer + i)));
+ printk("\n");
+}
+
+static u8 hex_val(unsigned char c)
+{
+ return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
+}
+
+static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
+{
+ int i;
+ static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
+ 24, 26, 28, 30, 32, 34};
+
+ if (strlen(str) != 36)
+ return AE_BAD_PARAMETER;
+ for (i = 0; i < 36; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (str[i] != '-')
+ return AE_BAD_PARAMETER;
+ } else if (!isxdigit(str[i]))
+ return AE_BAD_PARAMETER;
+ }
+ for (i = 0; i < 16; i++) {
+ uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
+ uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+ }
+ return AE_OK;
+}
+
+acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
+{
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object in_params[4];
+ union acpi_object *out_obj;
+ u8 uuid[16];
+ u32 errors;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+
+ if (!context)
+ return AE_ERROR;
+ if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
+ return AE_ERROR;
+ context->ret.length = ACPI_ALLOCATE_BUFFER;
+ context->ret.pointer = NULL;
+
+ /* Setting up input parameters */
+ input.count = 4;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = 16;
+ in_params[0].buffer.pointer = uuid;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = context->rev;
+ in_params[2].type = ACPI_TYPE_INTEGER;
+ in_params[2].integer.value = context->cap.length/sizeof(u32);
+ in_params[3].type = ACPI_TYPE_BUFFER;
+ in_params[3].buffer.length = context->cap.length;
+ in_params[3].buffer.pointer = context->cap.pointer;
+
+ status = acpi_evaluate_object(handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ if (!output.length)
+ return AE_NULL_OBJECT;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER
+ || out_obj->buffer.length != context->cap.length) {
+ acpi_print_osc_error(handle, context,
+ "_OSC evaluation returned wrong type");
+ status = AE_TYPE;
+ goto out_kfree;
+ }
+ /* Need to ignore the bit0 in result code */
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ if (errors & OSC_REQUEST_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC request failed");
+ if (errors & OSC_INVALID_UUID_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC invalid UUID");
+ if (errors & OSC_INVALID_REVISION_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC invalid revision");
+ if (errors & OSC_CAPABILITIES_MASK_ERROR) {
+ if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+ & OSC_QUERY_ENABLE)
+ goto out_success;
+ status = AE_SUPPORT;
+ goto out_kfree;
+ }
+ status = AE_ERROR;
+ goto out_kfree;
+ }
+out_success:
+ context->ret.length = out_obj->buffer.length;
+ context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
+ if (!context->ret.pointer) {
+ status = AE_NO_MEMORY;
+ goto out_kfree;
+ }
+ memcpy(context->ret.pointer, out_obj->buffer.pointer,
+ context->ret.length);
+ status = AE_OK;
+
+out_kfree:
+ kfree(output.pointer);
+ if (status != AE_OK)
+ context->ret.pointer = NULL;
+ return status;
+}
+EXPORT_SYMBOL(acpi_run_osc);
+
+static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
+static void acpi_bus_osc_support(void)
+{
+ u32 capbuf[2];
+ struct acpi_osc_context context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+ acpi_handle handle;
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
+ defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+#endif
+
+#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+#endif
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return;
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
+ kfree(context.ret.pointer);
+ /* do we need to check the returned cap? Sounds no */
+}
+
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
@@ -734,12 +895,16 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
+ acpi_bus_osc_support();
+
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
goto error1;
}
+ acpi_early_processor_set_pdc();
+
/*
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0c9c6a9..8a95e83 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct acpi_device *device)
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
device);
+ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
+ /*
+ * It is also regarded as success if the notifier_chain
+ * returns NOTIFY_OK or NOTIFY_DONE.
+ */
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 8a690c3..cc421b7 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <acpi/acpi_drivers.h>
@@ -196,6 +197,80 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
/* --------------------------------------------------------------------------
+ DebugFS Interface
+ -------------------------------------------------------------------------- */
+
+static ssize_t cm_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static int uncopied_bytes;
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = table.length;
+ buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (uncopied_bytes < count) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+};
+
+static int acpi_debugfs_init(void)
+{
+ struct dentry *acpi_dir, *cm_dentry;
+
+ acpi_dir = debugfs_create_dir("acpi", NULL);
+ if (!acpi_dir)
+ goto err;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
+ acpi_dir, NULL, &cm_fops);
+ if (!cm_dentry)
+ goto err;
+
+ return 0;
+
+err:
+ if (acpi_dir)
+ debugfs_remove(acpi_dir);
+ return -EINVAL;
+}
+
+/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS
@@ -286,7 +361,7 @@ static const struct file_operations acpi_system_debug_proc_fops = {
};
#endif
-int __init acpi_debug_init(void)
+int __init acpi_procfs_init(void)
{
#ifdef CONFIG_ACPI_PROCFS
struct proc_dir_entry *entry;
@@ -321,3 +396,10 @@ int __init acpi_debug_init(void)
return 0;
#endif
}
+
+int __init acpi_debug_init(void)
+{
+ acpi_debugfs_init();
+ acpi_procfs_init();
+ return 0;
+}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 30be3c1..b2586f5 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
" before undocking");
static struct atomic_notifier_head dock_notifier_list;
-static char dock_device_name[] = "dock";
static const struct acpi_device_id dock_device_ids[] = {
{"LNXDOCK", 0},
@@ -93,40 +92,30 @@ struct dock_dependent_device {
* Dock Dependent device functions *
*****************************************************************************/
/**
- * alloc_dock_dependent_device - allocate and init a dependent device
- * @handle: the acpi_handle of the dependent device
+ * add_dock_dependent_device - associate a device with the dock station
+ * @ds: The dock station
+ * @handle: handle of the dependent device
*
- * Allocate memory for a dependent device structure for a device referenced
- * by the acpi handle
+ * Add the dependent device to the dock's dependent device list.
*/
-static struct dock_dependent_device *
-alloc_dock_dependent_device(acpi_handle handle)
+static int
+add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
- if (dd) {
- dd->handle = handle;
- INIT_LIST_HEAD(&dd->list);
- INIT_LIST_HEAD(&dd->hotplug_list);
- }
- return dd;
-}
+ if (!dd)
+ return -ENOMEM;
+
+ dd->handle = handle;
+ INIT_LIST_HEAD(&dd->list);
+ INIT_LIST_HEAD(&dd->hotplug_list);
-/**
- * add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @dd: The dependent device
- *
- * Add the dependent device to the dock's dependent device list.
- */
-static void
-add_dock_dependent_device(struct dock_station *ds,
- struct dock_dependent_device *dd)
-{
spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
spin_unlock(&ds->dd_lock);
+
+ return 0;
}
/**
@@ -249,6 +238,7 @@ static int is_battery(acpi_handle handle)
static int is_ejectable_bay(acpi_handle handle)
{
acpi_handle phandle;
+
if (!is_ejectable(handle))
return 0;
if (is_battery(handle) || is_ata(handle))
@@ -275,14 +265,13 @@ int is_dock_device(acpi_handle handle)
if (is_dock(handle))
return 1;
- list_for_each_entry(dock_station, &dock_stations, sibling) {
+
+ list_for_each_entry(dock_station, &dock_stations, sibling)
if (find_dock_dependent_device(dock_station, handle))
return 1;
- }
return 0;
}
-
EXPORT_SYMBOL_GPL(is_dock_device);
/**
@@ -305,8 +294,6 @@ static int dock_present(struct dock_station *ds)
return 0;
}
-
-
/**
* dock_create_acpi_device - add new devices to acpi
* @handle - handle of the device to add
@@ -320,7 +307,7 @@ static int dock_present(struct dock_station *ds)
*/
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device;
struct acpi_device *parent_device;
acpi_handle parent;
int ret;
@@ -337,8 +324,7 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
ret = acpi_bus_add(&device, parent_device, handle,
ACPI_BUS_TYPE_DEVICE);
if (ret) {
- pr_debug("error adding bus, %x\n",
- -ret);
+ pr_debug("error adding bus, %x\n", -ret);
return NULL;
}
}
@@ -364,7 +350,6 @@ static void dock_remove_acpi_device(acpi_handle handle)
}
}
-
/**
* hotplug_dock_devices - insert or remove devices on the dock station
* @ds: the dock station
@@ -384,10 +369,9 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
/*
* First call driver specific hotplug functions
*/
- list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) {
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->handler)
dd->ops->handler(dd->handle, event, dd->context);
- }
/*
* Now make sure that an acpi_device is created for each
@@ -426,6 +410,7 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->uevent)
dd->ops->uevent(dd->handle, event, dd->context);
+
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
@@ -456,8 +441,8 @@ static void eject_dock(struct dock_station *ds)
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 1;
- if (ACPI_FAILURE(acpi_evaluate_object(ds->handle, "_EJ0",
- &arg_list, NULL)))
+ status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
+ if (ACPI_FAILURE(status))
pr_debug("Failed to evaluate _EJ0!\n");
}
@@ -577,7 +562,6 @@ int register_dock_notifier(struct notifier_block *nb)
return atomic_notifier_chain_register(&dock_notifier_list, nb);
}
-
EXPORT_SYMBOL_GPL(register_dock_notifier);
/**
@@ -591,7 +575,6 @@ void unregister_dock_notifier(struct notifier_block *nb)
atomic_notifier_chain_unregister(&dock_notifier_list, nb);
}
-
EXPORT_SYMBOL_GPL(unregister_dock_notifier);
/**
@@ -636,7 +619,6 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
return ret;
}
-
EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
/**
@@ -657,7 +639,6 @@ void unregister_hotplug_dock_device(acpi_handle handle)
dock_del_hotplug_device(dock_station, dd);
}
}
-
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
/**
@@ -772,7 +753,7 @@ struct dock_data {
static void acpi_dock_deferred_cb(void *context)
{
- struct dock_data *data = (struct dock_data *)context;
+ struct dock_data *data = context;
dock_notify(data->handle, data->event, data->ds);
kfree(data);
@@ -782,23 +763,22 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
unsigned long event, void *data)
{
struct dock_station *dock_station;
- acpi_handle handle = (acpi_handle)data;
+ acpi_handle handle = data;
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
- struct dock_data *dock_data;
+ struct dock_data *dd;
- dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL);
- if (!dock_data)
+ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
return 0;
- dock_data->handle = handle;
- dock_data->event = event;
- dock_data->ds = dock_station;
- acpi_os_hotplug_execute(acpi_dock_deferred_cb,
- dock_data);
+ dd->handle = handle;
+ dd->event = event;
+ dd->ds = dock_station;
+ acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
return 0 ;
}
}
@@ -826,7 +806,6 @@ find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
acpi_status status;
acpi_handle tmp, parent;
struct dock_station *ds = context;
- struct dock_dependent_device *dd;
status = acpi_bus_get_ejd(handle, &tmp);
if (ACPI_FAILURE(status)) {
@@ -840,11 +819,9 @@ find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
goto fdd_out;
}
- if (tmp == ds->handle) {
- dd = alloc_dock_dependent_device(handle);
- if (dd)
- add_dock_dependent_device(ds, dd);
- }
+ if (tmp == ds->handle)
+ add_dock_dependent_device(ds, handle);
+
fdd_out:
return AE_OK;
}
@@ -857,8 +834,7 @@ static ssize_t show_docked(struct device *dev,
{
struct acpi_device *tmp;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
return snprintf(buf, PAGE_SIZE, "1\n");
@@ -872,8 +848,7 @@ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
static ssize_t show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
@@ -886,8 +861,7 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
if (!count)
return -EINVAL;
@@ -905,8 +879,7 @@ static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
@@ -919,8 +892,7 @@ static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static ssize_t show_dock_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
char *type;
if (dock_station->flags & DOCK_IS_DOCK)
@@ -936,6 +908,19 @@ static ssize_t show_dock_type(struct device *dev,
}
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+static struct attribute *dock_attributes[] = {
+ &dev_attr_docked.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_undock.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_type.attr,
+ NULL
+};
+
+static struct attribute_group dock_attribute_group = {
+ .attrs = dock_attributes
+};
+
/**
* dock_add - add a new dock station
* @handle: the dock station handle
@@ -945,39 +930,31 @@ static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
*/
static int dock_add(acpi_handle handle)
{
- int ret;
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
- struct platform_device *dock_device;
+ int ret, id;
+ struct dock_station ds, *dock_station;
+ struct platform_device *dd;
+
+ id = dock_station_count;
+ memset(&ds, 0, sizeof(ds));
+ dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
+ if (IS_ERR(dd))
+ return PTR_ERR(dd);
+
+ dock_station = dd->dev.platform_data;
- /* allocate & initialize the dock_station private data */
- dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL);
- if (!dock_station)
- return -ENOMEM;
dock_station->handle = handle;
+ dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
- INIT_LIST_HEAD(&dock_station->dependent_devices);
- INIT_LIST_HEAD(&dock_station->hotplug_devices);
- INIT_LIST_HEAD(&dock_station->sibling);
- spin_lock_init(&dock_station->dd_lock);
+
mutex_init(&dock_station->hp_lock);
+ spin_lock_init(&dock_station->dd_lock);
+ INIT_LIST_HEAD(&dock_station->sibling);
+ INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
-
- /* initialize platform device stuff */
- dock_station->dock_device =
- platform_device_register_simple(dock_device_name,
- dock_station_count, NULL, 0);
- dock_device = dock_station->dock_device;
- if (IS_ERR(dock_device)) {
- kfree(dock_station);
- dock_station = NULL;
- return PTR_ERR(dock_device);
- }
- platform_device_add_data(dock_device, &dock_station,
- sizeof(struct dock_station *));
+ INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
- dev_set_uevent_suppress(&dock_device->dev, 0);
+ dev_set_uevent_suppress(&dd->dev, 0);
if (is_dock(handle))
dock_station->flags |= DOCK_IS_DOCK;
@@ -986,47 +963,9 @@ static int dock_add(acpi_handle handle)
if (is_battery(handle))
dock_station->flags |= DOCK_IS_BAT;
- ret = device_create_file(&dock_device->dev, &dev_attr_docked);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_undock);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_uid);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_flags);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_type);
+ ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
- printk(KERN_ERR"Error %d adding sysfs file\n", ret);
+ goto err_unregister;
/* Find dependent devices */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
@@ -1034,58 +973,43 @@ static int dock_add(acpi_handle handle)
dock_station, NULL);
/* add the dock station as a device dependent on itself */
- dd = alloc_dock_dependent_device(handle);
- if (!dd) {
- kfree(dock_station);
- dock_station = NULL;
- ret = -ENOMEM;
- goto dock_add_err_unregister;
- }
- add_dock_dependent_device(dock_station, dd);
+ ret = add_dock_dependent_device(dock_station, handle);
+ if (ret)
+ goto err_rmgroup;
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
return 0;
-dock_add_err_unregister:
- device_remove_file(&dock_device->dev, &dev_attr_type);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- device_remove_file(&dock_device->dev, &dev_attr_flags);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
+err_rmgroup:
+ sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+err_unregister:
+ platform_device_unregister(dd);
+ printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
return ret;
}
/**
* dock_remove - free up resources related to the dock station
*/
-static int dock_remove(struct dock_station *dock_station)
+static int dock_remove(struct dock_station *ds)
{
struct dock_dependent_device *dd, *tmp;
- struct platform_device *dock_device = dock_station->dock_device;
+ struct platform_device *dock_device = ds->dock_device;
if (!dock_station_count)
return 0;
/* remove dependent devices */
- list_for_each_entry_safe(dd, tmp, &dock_station->dependent_devices,
- list)
- kfree(dd);
+ list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
+ kfree(dd);
+
+ list_del(&ds->sibling);
/* cleanup sysfs */
- device_remove_file(&dock_device->dev, &dev_attr_type);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- device_remove_file(&dock_device->dev, &dev_attr_flags);
+ sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
platform_device_unregister(dock_device);
- /* free dock station memory */
- kfree(dock_station);
- dock_station = NULL;
return 0;
}
@@ -1103,11 +1027,10 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status = AE_OK;
- if (is_dock(handle)) {
- if (dock_add(handle) >= 0) {
+ if (is_dock(handle))
+ if (dock_add(handle) >= 0)
status = AE_CTRL_TERMINATE;
- }
- }
+
return status;
}
@@ -1145,8 +1068,7 @@ static int __init dock_init(void)
static void __exit dock_exit(void)
{
- struct dock_station *dock_station;
- struct dock_station *tmp;
+ struct dock_station *tmp, *dock_station;
unregister_acpi_bus_notifier(&dock_acpi_notifier);
list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 75b147f..d6471bb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -201,14 +201,13 @@ unlock:
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
+static int acpi_ec_sync_query(struct acpi_ec *ec);
-static int ec_check_sci(struct acpi_ec *ec, u8 state)
+static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_os_execute(OSL_EC_BURST_HANDLER,
- acpi_ec_gpe_query, ec);
+ return acpi_ec_sync_query(ec);
}
return 0;
}
@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
{
unsigned long tmp;
int ret = 0;
- pr_debug(PREFIX "transaction start\n");
- /* disable GPE during transaction if storm is detected */
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- acpi_disable_gpe(NULL, ec->gpe);
- }
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
spin_unlock_irqrestore(&ec->curr_lock, tmp);
ret = ec_poll(ec);
- pr_debug(PREFIX "transaction end\n");
spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
spin_unlock_irqrestore(&ec->curr_lock, tmp);
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- /* check if we received SCI during transaction */
- ec_check_sci(ec, acpi_ec_read_status(ec));
- /* it is safe to enable GPE outside of transaction */
- acpi_enable_gpe(NULL, ec->gpe);
- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
- pr_info(PREFIX "GPE storm detected, "
- "transactions will use polling mode\n");
- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
- }
return ret;
}
@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
status = -ETIME;
goto end;
}
+ pr_debug(PREFIX "transaction start\n");
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ acpi_disable_gpe(NULL, ec->gpe);
+ }
+
status = acpi_ec_transaction_unlocked(ec, t);
+
+ /* check if we received SCI during transaction */
+ ec_check_sci_sync(ec, acpi_ec_read_status(ec));
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ msleep(1);
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe);
+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+ pr_debug(PREFIX "transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -443,7 +445,7 @@ int ec_transaction(u8 command,
EXPORT_SYMBOL(ec_transaction);
-static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
{
int result;
u8 d;
@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
.wlen = 0, .rlen = 1};
if (!ec || !data)
return -EINVAL;
-
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
-
- result = acpi_ec_transaction(ec, &t);
+ result = acpi_ec_transaction_unlocked(ec, &t);
if (result)
return result;
-
if (!d)
return -ENODATA;
-
*data = d;
return 0;
}
@@ -509,43 +507,79 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
-static void acpi_ec_gpe_query(void *ec_cxt)
+static void acpi_ec_run(void *cxt)
{
- struct acpi_ec *ec = ec_cxt;
- u8 value = 0;
- struct acpi_ec_query_handler *handler, copy;
-
- if (!ec || acpi_ec_query(ec, &value))
+ struct acpi_ec_query_handler *handler = cxt;
+ if (!handler)
return;
- mutex_lock(&ec->lock);
+ pr_debug(PREFIX "start query execution\n");
+ if (handler->func)
+ handler->func(handler->data);
+ else if (handler->handle)
+ acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
+ pr_debug(PREFIX "stop query execution\n");
+ kfree(handler);
+}
+
+static int acpi_ec_sync_query(struct acpi_ec *ec)
+{
+ u8 value = 0;
+ int status;
+ struct acpi_ec_query_handler *handler, *copy;
+ if ((status = acpi_ec_query_unlocked(ec, &value)))
+ return status;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
- memcpy(&copy, handler, sizeof(copy));
- mutex_unlock(&ec->lock);
- if (copy.func) {
- copy.func(copy.data);
- } else if (copy.handle) {
- acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
- }
- return;
+ copy = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ memcpy(copy, handler, sizeof(*copy));
+ pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+ return acpi_os_execute((copy->func) ?
+ OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
+ acpi_ec_run, copy);
}
}
+ return 0;
+}
+
+static void acpi_ec_gpe_query(void *ec_cxt)
+{
+ struct acpi_ec *ec = ec_cxt;
+ if (!ec)
+ return;
+ mutex_lock(&ec->lock);
+ acpi_ec_sync_query(ec);
mutex_unlock(&ec->lock);
}
+static void acpi_ec_gpe_query(void *ec_cxt);
+
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
+{
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+ pr_debug(PREFIX "push gpe query to the queue\n");
+ return acpi_os_execute(OSL_NOTIFY_HANDLER,
+ acpi_ec_gpe_query, ec);
+ }
+ }
+ return 0;
+}
+
static u32 acpi_ec_gpe_handler(void *data)
{
struct acpi_ec *ec = data;
- u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
- status = acpi_ec_read_status(ec);
- advance_transaction(ec, status);
- if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ if (ec_transaction_done(ec) &&
+ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
wake_up(&ec->wait);
- ec_check_sci(ec, status);
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ }
return ACPI_INTERRUPT_HANDLED;
}
@@ -916,6 +950,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
/* MSI EC needs special treatment, enable it */
static int ec_flag_msi(const struct dmi_system_id *id)
{
+ printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
EC_FLAGS_MSI = 1;
EC_FLAGS_VALIDATE_ECDT = 1;
return 0;
@@ -928,8 +963,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
{
ec_flag_msi, "MSI hardware", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
- DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
+ DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL},
+ {
+ ec_flag_msi, "MSI hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL},
+ {
+ ec_flag_msi, "MSI hardware", {
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL},
{
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f419849..acf2ab2 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -267,7 +267,7 @@ static int acpi_fan_add(struct acpi_device *device)
goto end;
}
- dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id);
+ dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id);
device->driver_data = cdev;
result = sysfs_create_link(&device->dev.kobj,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 074cf86..cb28e05 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -43,6 +43,7 @@ int acpi_power_transition(struct acpi_device *device, int state);
extern int acpi_power_nocheck;
int acpi_wakeup_device_init(void);
+void acpi_early_processor_set_pdc(void);
/* --------------------------------------------------------------------------
Embedded Controller
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2be2fb6..7ad48df 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
+#include <linux/numa.h>
#include <acpi/acpi_bus.h>
#define PREFIX "ACPI: "
@@ -40,14 +41,14 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE;
/* maps to convert between proximity domain and logical node ID */
static int pxm_to_node_map[MAX_PXM_DOMAINS]
- = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
static int node_to_pxm_map[MAX_NUMNODES]
- = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
int pxm_to_node(int pxm)
{
if (pxm < 0)
- return NID_INVAL;
+ return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
@@ -68,9 +69,9 @@ int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
- if (node < 0){
+ if (node < 0) {
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NID_INVAL;
+ return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
__acpi_map_pxm_to_node(pxm, node);
node_set(node, nodes_found_map);
@@ -79,16 +80,6 @@ int acpi_map_pxm_to_node(int pxm)
return node;
}
-#if 0
-void __cpuinit acpi_unmap_pxm_to_node(int node)
-{
- int pxm = node_to_pxm_map[node];
- pxm_to_node_map[pxm] = NID_INVAL;
- node_to_pxm_map[node] = PXM_INVAL;
- node_clear(node, nodes_found_map);
-}
-#endif /* 0 */
-
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7c1c59e..02e8464 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1118,7 +1118,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
/* Check for resource conflicts between ACPI OperationRegions and native
* drivers */
-int acpi_check_resource_conflict(struct resource *res)
+int acpi_check_resource_conflict(const struct resource *res)
{
struct acpi_res_list *res_list_elem;
int ioport;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 394ae89..04b0f00 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("pci_link");
static int acpi_pci_link_add(struct acpi_device *device);
static int acpi_pci_link_remove(struct acpi_device *device, int type);
-static struct acpi_device_id link_device_ids[] = {
+static const struct acpi_device_id link_device_ids[] = {
{"PNP0C0F", 0},
{"", 0},
};
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1af8081..64f55b6 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -46,7 +46,7 @@ static int acpi_pci_root_add(struct acpi_device *device);
static int acpi_pci_root_remove(struct acpi_device *device, int type);
static int acpi_pci_root_start(struct acpi_device *device);
-static struct acpi_device_id root_device_ids[] = {
+static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
@@ -202,72 +202,24 @@ static void acpi_pci_bridge_scan(struct acpi_device *device)
}
}
-static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
- 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
+static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
const u32 *capbuf, u32 *retval)
{
+ struct acpi_osc_context context = {
+ .uuid_str = pci_osc_uuid_str,
+ .rev = 1,
+ .cap.length = 12,
+ .cap.pointer = (void *)capbuf,
+ };
acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[4];
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *out_obj;
- u32 errors;
-
- /* Setting up input parameters */
- input.count = 4;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = 16;
- in_params[0].buffer.pointer = OSC_UUID;
- in_params[1].type = ACPI_TYPE_INTEGER;
- in_params[1].integer.value = 1;
- in_params[2].type = ACPI_TYPE_INTEGER;
- in_params[2].integer.value = 3;
- in_params[3].type = ACPI_TYPE_BUFFER;
- in_params[3].buffer.length = 12;
- in_params[3].buffer.pointer = (u8 *)capbuf;
-
- status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE(status))
- return status;
- if (!output.length)
- return AE_NULL_OBJECT;
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_DEBUG "_OSC evaluation returned wrong type\n");
- status = AE_TYPE;
- goto out_kfree;
- }
- /* Need to ignore the bit0 in result code */
- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
- if (errors) {
- if (errors & OSC_REQUEST_ERROR)
- printk(KERN_DEBUG "_OSC request failed\n");
- if (errors & OSC_INVALID_UUID_ERROR)
- printk(KERN_DEBUG "_OSC invalid UUID\n");
- if (errors & OSC_INVALID_REVISION_ERROR)
- printk(KERN_DEBUG "_OSC invalid revision\n");
- if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (capbuf[OSC_QUERY_TYPE] & OSC_QUERY_ENABLE)
- goto out_success;
- printk(KERN_DEBUG
- "Firmware did not grant requested _OSC control\n");
- status = AE_SUPPORT;
- goto out_kfree;
- }
- status = AE_ERROR;
- goto out_kfree;
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_SUCCESS(status)) {
+ *retval = *((u32 *)(context.ret.pointer + 8));
+ kfree(context.ret.pointer);
}
-out_success:
- *retval = *((u32 *)(out_obj->buffer.pointer + 8));
- status = AE_OK;
-
-out_kfree:
- kfree(output.pointer);
return status;
}
@@ -277,10 +229,10 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
u32 support_set, result, capbuf[3];
/* do _OSC query for all possible controls */
- support_set = root->osc_support_set | (flags & OSC_SUPPORT_MASKS);
+ support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_TYPE] = support_set;
- capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
@@ -427,7 +379,7 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
if (ACPI_FAILURE(status))
return status;
- control_req = (flags & OSC_CONTROL_MASKS);
+ control_req = (flags & OSC_PCI_CONTROL_MASKS);
if (!control_req)
return AE_TYPE;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 22b2979..0f30c3c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -65,7 +65,7 @@ static int acpi_power_remove(struct acpi_device *device, int type);
static int acpi_power_resume(struct acpi_device *device);
static int acpi_power_open_fs(struct inode *inode, struct file *file);
-static struct acpi_device_id power_device_ids[] = {
+static const struct acpi_device_id power_device_ids[] = {
{ACPI_POWER_HID, 0},
{"", 0},
};
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index 2ef7030..dc4ffad 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -64,7 +64,7 @@ static int can_cap_in_hardware(void)
return force_cap_on || cap_in_hardware;
}
-static struct acpi_device_id power_meter_ids[] = {
+static const struct acpi_device_id power_meter_ids[] = {
{"ACPI000D", 0},
{"", 0},
};
@@ -534,6 +534,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource)
kfree(resource->domain_devices);
kobject_put(resource->holders_dir);
+ resource->num_domain_devices = 0;
}
static int read_domain_devices(struct acpi_power_meter_resource *resource)
@@ -740,7 +741,6 @@ skip_unsafe_cap:
return res;
error:
- remove_domain_devices(resource);
remove_attrs(resource);
return res;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cb4283f..9863c98 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -124,29 +124,6 @@ static const struct file_operations acpi_processor_info_fops = {
DEFINE_PER_CPU(struct acpi_processor *, processors);
struct acpi_processor_errata errata __read_mostly;
-static int set_no_mwait(const struct dmi_system_id *id)
-{
- printk(KERN_NOTICE PREFIX "%s detected - "
- "disabling mwait for CPU C-states\n", id->ident);
- idle_nomwait = 1;
- return 0;
-}
-
-static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
- {
- set_no_mwait, "IFL91 board", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
- DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
/* --------------------------------------------------------------------------
Errata Handling
@@ -277,45 +254,6 @@ static int acpi_processor_errata(struct acpi_processor *pr)
}
/* --------------------------------------------------------------------------
- Common ACPI processor functions
- -------------------------------------------------------------------------- */
-
-/*
- * _PDC is required for a BIOS-OS handshake for most of the newer
- * ACPI processor features.
- */
-static int acpi_processor_set_pdc(struct acpi_processor *pr)
-{
- struct acpi_object_list *pdc_in = pr->pdc;
- acpi_status status = AE_OK;
-
-
- if (!pdc_in)
- return status;
- if (idle_nomwait) {
- /*
- * If mwait is disabled for CPU C-states, the C2C3_FFH access
- * mode will be disabled in the parameter of _PDC object.
- * Of course C1_FFH access mode will also be disabled.
- */
- union acpi_object *obj;
- u32 *buffer = NULL;
-
- obj = pdc_in->pointer;
- buffer = (u32 *)(obj->buffer.pointer);
- buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
-
- }
- status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
-
- if (ACPI_FAILURE(status))
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Could not evaluate _PDC, using legacy perf. control...\n"));
-
- return status;
-}
-
-/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -353,7 +291,7 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
PDE(inode)->data);
}
-static int acpi_processor_add_fs(struct acpi_device *device)
+static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
@@ -722,7 +660,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
saved = pr->performance_platform_limit;
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 1);
if (saved == pr->performance_platform_limit)
break;
acpi_bus_generate_proc_event(device, event,
@@ -758,7 +696,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_processor *pr = per_cpu(processors, cpu);
if (action == CPU_ONLINE && pr) {
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_cst_has_changed(pr);
acpi_processor_tstate_has_changed(pr);
}
@@ -825,12 +763,10 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
}
/* _PDC call should be done before doing anything else (if reqd.). */
- arch_acpi_processor_init_pdc(pr);
- acpi_processor_set_pdc(pr);
- arch_acpi_processor_cleanup_pdc(pr);
+ acpi_processor_set_pdc(pr->handle);
#ifdef CONFIG_CPU_FREQ
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 0);
#endif
acpi_processor_get_throttling_info(pr);
acpi_processor_get_limit_info(pr);
@@ -845,7 +781,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
goto err_power_exit;
}
- dev_info(&device->dev, "registered as cooling_device%d\n",
+ dev_dbg(&device->dev, "registered as cooling_device%d\n",
pr->cdev->id);
result = sysfs_create_link(&device->dev.kobj,
@@ -1145,11 +1081,6 @@ static int __init acpi_processor_init(void)
if (!acpi_processor_dir)
return -ENOMEM;
#endif
- /*
- * Check whether the system is DMI table. If yes, OSPM
- * should not use mwait for CPU-states.
- */
- dmi_check_system(processor_idle_dmi_table);
result = cpuidle_register_driver(&acpi_idle_driver);
if (result < 0)
goto out_proc;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index bbd066e..cc978a8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
+ { set_max_cstate, "Pavilion zv5000", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
+ (void *)1},
+ { set_max_cstate, "Asus L8400B", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+ (void *)1},
{},
};
@@ -164,7 +172,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state;
}
-static void lapic_timer_propagate_broadcast(void *arg)
+static void __lapic_timer_propagate_broadcast(void *arg)
{
struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
@@ -175,6 +183,12 @@ static void lapic_timer_propagate_broadcast(void *arg)
clockevents_notify(reason, &pr->id);
}
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
+{
+ smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
+ (void *)pr, 1);
+}
+
/* Power(C) State timer broadcast control */
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
@@ -299,6 +313,28 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
+ /*
+ * FADT specified C2 latency must be less than or equal to
+ * 100 microseconds.
+ */
+ if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
+ /* invalidate C2 */
+ pr->power.states[ACPI_STATE_C2].address = 0;
+ }
+
+ /*
+ * FADT supplied C3 latency must be less than or equal to
+ * 1000 microseconds.
+ */
+ if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
+ /* invalidate C3 */
+ pr->power.states[ACPI_STATE_C3].address = 0;
+ }
+
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"lvl2[0x%08x] lvl3[0x%08x]\n",
pr->power.states[ACPI_STATE_C2].address,
@@ -488,33 +524,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
return status;
}
-static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
-{
-
- if (!cx->address)
- return;
-
- /*
- * C2 latency must be less than or equal to 100
- * microseconds.
- */
- else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "latency too large [%d]\n", cx->latency));
- return;
- }
-
- /*
- * Otherwise we've met all of our C2 requirements.
- * Normalize the C2 latency to expidite policy
- */
- cx->valid = 1;
-
- cx->latency_ticks = cx->latency;
-
- return;
-}
-
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
@@ -526,16 +535,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
return;
/*
- * C3 latency must be less than or equal to 1000
- * microseconds.
- */
- else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "latency too large [%d]\n", cx->latency));
- return;
- }
-
- /*
* PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
* DMA transfers are used by any ISA device to avoid livelock.
* Note that we could disable Type-F DMA (as recommended by
@@ -623,7 +622,10 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
break;
case ACPI_STATE_C2:
- acpi_processor_power_verify_c2(cx);
+ if (!cx->address)
+ break;
+ cx->valid = 1;
+ cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
@@ -638,8 +640,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
working++;
}
- smp_call_function_single(pr->id, lapic_timer_propagate_broadcast,
- pr, 1);
+ lapic_timer_propagate_broadcast(pr);
return (working);
}
@@ -879,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
@@ -964,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
new file mode 100644
index 0000000..e306ba9
--- /dev/null
+++ b/drivers/acpi/processor_pdc.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2005 Intel Corporation
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * Alex Chiang <achiang@hp.com>
+ * - Unified x86/ia64 implementations
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+#include <linux/dmi.h>
+
+#include <acpi/acpi_drivers.h>
+#include <acpi/processor.h>
+
+#include "internal.h"
+
+#define PREFIX "ACPI: "
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_pdc");
+
+static int set_no_mwait(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE PREFIX "%s detected - "
+ "disabling mwait for CPU C-states\n", id->ident);
+ idle_nomwait = 1;
+ return 0;
+}
+
+static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
+ {
+ set_no_mwait, "IFL91 board", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
+ DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+static void acpi_set_pdc_bits(u32 *buf)
+{
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+
+ /* Enable coordination with firmware's _TSD info */
+ buf[2] = ACPI_PDC_SMP_T_SWCOORD;
+
+ /* Twiddle arch-specific bits needed for _PDC */
+ arch_acpi_set_pdc_bits(buf);
+}
+
+static struct acpi_object_list *acpi_processor_alloc_pdc(void)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ return NULL;
+ }
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj_list);
+ return NULL;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(obj);
+ kfree(obj_list);
+ return NULL;
+ }
+
+ acpi_set_pdc_bits(buf);
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+
+ return obj_list;
+}
+
+/*
+ * _PDC is required for a BIOS-OS handshake for most of the newer
+ * ACPI processor features.
+ */
+static int
+acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
+{
+ acpi_status status = AE_OK;
+
+ if (idle_nomwait) {
+ /*
+ * If mwait is disabled for CPU C-states, the C2C3_FFH access
+ * mode will be disabled in the parameter of _PDC object.
+ * Of course C1_FFH access mode will also be disabled.
+ */
+ union acpi_object *obj;
+ u32 *buffer = NULL;
+
+ obj = pdc_in->pointer;
+ buffer = (u32 *)(obj->buffer.pointer);
+ buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
+
+ }
+ status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
+
+ if (ACPI_FAILURE(status))
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Could not evaluate _PDC, using legacy perf. control.\n"));
+
+ return status;
+}
+
+static int early_pdc_done;
+
+void acpi_processor_set_pdc(acpi_handle handle)
+{
+ struct acpi_object_list *obj_list;
+
+ if (arch_has_acpi_pdc() == false)
+ return;
+
+ if (early_pdc_done)
+ return;
+
+ obj_list = acpi_processor_alloc_pdc();
+ if (!obj_list)
+ return;
+
+ acpi_processor_eval_pdc(handle, obj_list);
+
+ kfree(obj_list->pointer->buffer.pointer);
+ kfree(obj_list->pointer);
+ kfree(obj_list);
+}
+EXPORT_SYMBOL_GPL(acpi_processor_set_pdc);
+
+static int early_pdc_optin;
+static int set_early_pdc_optin(const struct dmi_system_id *id)
+{
+ early_pdc_optin = 1;
+ return 0;
+}
+
+static int param_early_pdc_optin(char *s)
+{
+ early_pdc_optin = 1;
+ return 1;
+}
+__setup("acpi_early_pdc_eval", param_early_pdc_optin);
+
+static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
+ {
+ set_early_pdc_optin, "HP Envy", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Envy") }, NULL},
+ {
+ set_early_pdc_optin, "HP Pavilion dv6", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6") }, NULL},
+ {
+ set_early_pdc_optin, "HP Pavilion dv7", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7") }, NULL},
+ {},
+};
+
+static acpi_status
+early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ acpi_processor_set_pdc(handle);
+ return AE_OK;
+}
+
+void __init acpi_early_processor_set_pdc(void)
+{
+ /*
+ * Check whether the system is DMI table. If yes, OSPM
+ * should not use mwait for CPU-states.
+ */
+ dmi_check_system(processor_idle_dmi_table);
+
+ /*
+ * Allow systems to opt-in to early _PDC evaluation.
+ */
+ dmi_check_system(early_pdc_optin_table);
+ if (!early_pdc_optin)
+ return;
+
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ early_init_pdc, NULL, NULL, NULL);
+
+ early_pdc_done = 1;
+}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 01e366d..a959f6a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -152,15 +152,59 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
return 0;
}
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
+/*
+ * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
+ * @handle: ACPI processor handle
+ * @status: the status code of _PPC evaluation
+ * 0: success. OSPM is now using the performance state specificed.
+ * 1: failure. OSPM has not changed the number of P-states in use
+ */
+static void acpi_processor_ppc_ost(acpi_handle handle, int status)
+{
+ union acpi_object params[2] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ };
+ struct acpi_object_list arg_list = {2, params};
+ acpi_handle temp;
+
+ params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
+ params[1].integer.value = status;
+
+ /* when there is no _OST , skip it */
+ if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp)))
+ return;
+
+ acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+ return;
+}
+
+int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
- if (ignore_ppc)
+ if (ignore_ppc) {
+ /*
+ * Only when it is notification event, the _OST object
+ * will be evaluated. Otherwise it is skipped.
+ */
+ if (event_flag)
+ acpi_processor_ppc_ost(pr->handle, 1);
return 0;
+ }
ret = acpi_processor_get_platform_limit(pr);
-
+ /*
+ * Only when it is notification event, the _OST object
+ * will be evaluated. Otherwise it is skipped.
+ */
+ if (event_flag) {
+ if (ret < 0)
+ acpi_processor_ppc_ost(pr->handle, 1);
+ else
+ acpi_processor_ppc_ost(pr->handle, 0);
+ }
if (ret < 0)
return (ret);
else
@@ -369,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
if (result)
goto update_bios;
- return 0;
+ /* We need to call _PPC once when cpufreq starts */
+ if (ignore_ppc != 1)
+ result = acpi_processor_get_platform_limit(pr);
+
+ return result;
/*
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 140c5c5..6deafb4 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -443,8 +443,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = {
#ifdef CONFIG_ACPI_PROCFS
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
{
- struct acpi_processor *pr = (struct acpi_processor *)seq->private;
-
+ struct acpi_processor *pr = seq->private;
if (!pr)
goto end;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 52b9db8..b16ddbf 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -822,7 +822,10 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
{
+#if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER)
struct acpi_battery *battery = &sbs->battery[id];
+#endif
+
#ifdef CONFIG_ACPI_SYSFS_POWER
if (battery->bat.dev) {
if (battery->have_sysfs_alarm)
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index d933980..fd09229 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -242,7 +242,7 @@ static int smbus_alarm(void *context)
case ACPI_SBS_CHARGER:
case ACPI_SBS_MANAGER:
case ACPI_SBS_BATTERY:
- acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_smbus_callback, hc);
default:;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ff9f622..3e00967 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
if (child)
*child = device;
- return 0;
+
+ if (device)
+ return 0;
+ else
+ return -ENODEV;
}
+/*
+ * acpi_bus_add and acpi_bus_start
+ *
+ * scan a given ACPI tree and (probably recently hot-plugged)
+ * create and add or starts found devices.
+ *
+ * If no devices were found -ENODEV is returned which does not
+ * mean that this is a real error, there just have been no suitable
+ * ACPI objects in the table trunk from which the kernel could create
+ * a device and add/start an appropriate driver.
+ */
+
int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
- acpi_bus_scan(handle, &ops, child);
- return 0;
+ return acpi_bus_scan(handle, &ops, child);
}
EXPORT_SYMBOL(acpi_bus_add);
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
+ if (!device)
+ return -EINVAL;
+
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
- acpi_bus_scan(device->handle, &ops, NULL);
- return 0;
+ return acpi_bus_scan(device->handle, &ops, NULL);
}
EXPORT_SYMBOL(acpi_bus_start);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 5f2c379..79d33d9 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
/*
+ * According to the ACPI specification the BIOS should make sure that ACPI is
+ * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
+ * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
+ * on such systems during resume. Unfortunately that doesn't help in
+ * particularly pathological cases in which SCI_EN has to be set directly on
+ * resume, although the specification states very clearly that this flag is
+ * owned by the hardware. The set_sci_en_on_resume variable will be set in such
+ * cases.
+ */
+static bool set_sci_en_on_resume;
+
+void __init acpi_set_sci_en_on_resume(void)
+{
+ set_sci_en_on_resume = true;
+}
+
+/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
@@ -170,18 +187,6 @@ static void acpi_pm_end(void)
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
-/*
- * According to the ACPI specification the BIOS should make sure that ACPI is
- * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
- * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
- * on such systems during resume. Unfortunately that doesn't help in
- * particularly pathological cases in which SCI_EN has to be set directly on
- * resume, although the specification states very clearly that this flag is
- * owned by the hardware. The set_sci_en_on_resume variable will be set in such
- * cases.
- */
-static bool set_sci_en_on_resume;
-
extern void do_suspend_lowlevel(void);
static u32 acpi_suspend_states[] = {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7..8a0ed28 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 65f6781..9073ada 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1052,6 +1052,13 @@ static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
acpi_device_bid(device));
}
seq_puts(seq, "\n");
+ } else {
+ seq_printf(seq, "passive (forced):");
+ if (tz->thermal_zone->forced_passive)
+ seq_printf(seq, " %i C\n",
+ tz->thermal_zone->forced_passive / 1000);
+ else
+ seq_printf(seq, "<not set>\n");
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 05dff63..b765790 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -78,6 +78,13 @@ MODULE_LICENSE("GPL");
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
+/*
+ * By default, we don't allow duplicate ACPI video bus devices
+ * under the same VGA controller
+ */
+static int allow_duplicates;
+module_param(allow_duplicates, bool, 0644);
+
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -999,8 +1006,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
sprintf(name, "acpi_video%d", count++);
device->backlight = backlight_device_register(name,
NULL, device, &acpi_backlight_ops);
- device->backlight->props.max_brightness = device->brightness->count-3;
kfree(name);
+ if (IS_ERR(device->backlight))
+ return;
+ device->backlight->props.max_brightness = device->brightness->count-3;
result = sysfs_create_link(&device->backlight->dev.kobj,
&device->dev->dev.kobj, "device");
@@ -1979,6 +1988,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
+ /* no warning message if acpi_backlight=vendor is used */
+ if (!acpi_video_backlight_support())
+ return 0;
+
if (!device->brightness)
goto out;
@@ -2233,11 +2246,47 @@ static int acpi_video_resume(struct acpi_device *device)
return AE_OK;
}
+static acpi_status
+acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
+ void **return_value)
+{
+ struct acpi_device *device = context;
+ struct acpi_device *sibling;
+ int result;
+
+ if (handle == device->handle)
+ return AE_CTRL_TERMINATE;
+
+ result = acpi_bus_get_device(handle, &sibling);
+ if (result)
+ return AE_OK;
+
+ if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
+ return AE_ALREADY_EXISTS;
+
+ return AE_OK;
+}
+
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
struct input_dev *input;
int error;
+ acpi_status status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ device->parent->handle, 1,
+ acpi_video_bus_match, NULL,
+ device, NULL);
+ if (status == AE_ALREADY_EXISTS) {
+ printk(KERN_WARNING FW_BUG
+ "Duplicate ACPI video bus devices for the"
+ " same VGA controller, please try module "
+ "parameter \"video.allow_duplicates=1\""
+ "if the current driver doesn't work.\n");
+ if (!allow_duplicates)
+ return -ENODEV;
+ }
video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
if (!video)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 85844d0..56c6374 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -40,7 +40,6 @@ config ATA_VERBOSE_ERROR
config ATA_ACPI
bool "ATA ACPI Support"
depends on ACPI && PCI
- select ACPI_DOCK
default y
help
This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea10..b343903 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
},
.driver_data = "F.23", /* cutoff BIOS version */
},
+ /*
+ * Acer eMachines G725 has the same problem. BIOS
+ * V1.03 is known to be broken. V3.04 is known to
+ * work. Inbetween, there are V1.06, V2.06 and V3.03
+ * that we don't have much idea about. For now,
+ * blacklist anything older than V3.04.
+ */
+ {
+ .ident = "G725",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
+ },
+ .driver_data = "V3.04", /* cutoff BIOS version */
+ },
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 19136a7..6f3f225 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,7 +329,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct ata_port_operations piix_sata_ops = {
- .inherits = &ata_bmdma_port_ops,
+ .inherits = &ata_bmdma32_port_ops,
};
static struct ata_port_operations piix_sidpr_sata_ops = {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 22ff51b..6728328 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3790,21 +3790,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
int sata_link_resume(struct ata_link *link, const unsigned long *params,
unsigned long deadline)
{
+ int tries = ATA_LINK_RESUME_TRIES;
u32 scontrol, serror;
int rc;
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
return rc;
- scontrol = (scontrol & 0x0f0) | 0x300;
+ /*
+ * Writes to SControl sometimes get ignored under certain
+ * controllers (ata_piix SIDPR). Make sure DET actually is
+ * cleared.
+ */
+ do {
+ scontrol = (scontrol & 0x0f0) | 0x300;
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
+ /*
+ * Some PHYs react badly if SStatus is pounded
+ * immediately after resuming. Delay 200ms before
+ * debouncing.
+ */
+ msleep(200);
- if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
- return rc;
+ /* is SControl restored correctly? */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+ } while ((scontrol & 0xf0f) != 0x300 && --tries);
- /* Some PHYs react badly if SStatus is pounded immediately
- * after resuming. Delay 200ms before debouncing.
- */
- msleep(200);
+ if ((scontrol & 0xf0f) != 0x300) {
+ ata_link_printk(link, KERN_ERR,
+ "failed to resume link (SControl %X)\n",
+ scontrol);
+ return 0;
+ }
+
+ if (tries < ATA_LINK_RESUME_TRIES)
+ ata_link_printk(link, KERN_WARNING,
+ "link resume succeeded after %d retries\n",
+ ATA_LINK_RESUME_TRIES - tries);
if ((rc = sata_link_debounce(link, params, deadline)))
return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 0ea97c9..9f6cfac 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2028,8 +2028,9 @@ static void ata_eh_link_autopsy(struct ata_link *link)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
/* determine whether the command is worth retrying */
- if (!(qc->err_mask & AC_ERR_INVALID) &&
- ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
+ if (qc->flags & ATA_QCFLAG_IO ||
+ (!(qc->err_mask & AC_ERR_INVALID) &&
+ qc->err_mask != AC_ERR_DEV))
qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1683ebd..d096fbc 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
- tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;
@@ -3022,7 +3022,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case WRITE_16:
return ata_scsi_rw_xlat;
- case 0x93 /*WRITE_SAME_16*/:
+ case WRITE_SAME_16:
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index efa8773..730ef3c 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
+ if (!do_write)
+ flush_dcache_page(page);
+
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
@@ -2275,7 +2278,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
ap = qc->ap;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
- && count < 32768; count++)
+ && count < 65536; count += 2)
ioread16(ap->ioaddr.data_addr);
/* Can become DEBUG later */
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index c4b47a3..02c81f1 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1557,6 +1557,25 @@ static unsigned short atapi_io_port[] = {
P_ATAPI_DMARQ,
P_ATAPI_INTRQ,
P_ATAPI_IORDY,
+ P_ATAPI_D0A,
+ P_ATAPI_D1A,
+ P_ATAPI_D2A,
+ P_ATAPI_D3A,
+ P_ATAPI_D4A,
+ P_ATAPI_D5A,
+ P_ATAPI_D6A,
+ P_ATAPI_D7A,
+ P_ATAPI_D8A,
+ P_ATAPI_D9A,
+ P_ATAPI_D10A,
+ P_ATAPI_D11A,
+ P_ATAPI_D12A,
+ P_ATAPI_D13A,
+ P_ATAPI_D14A,
+ P_ATAPI_D15A,
+ P_ATAPI_A0A,
+ P_ATAPI_A1A,
+ P_ATAPI_A2A,
0
};
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index dadfc35..0efb1f5 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_cmd64x"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.2.5"
/*
* CMD64x specific registers definition.
@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
/* Merge the control bits */
regU |= 1 << adev->devno; /* UDMA on */
- if (adev->dma_mode > 2) /* 15nS timing */
+ if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
regU |= 4 << adev->devno;
} else {
regU &= ~ (1 << adev->devno); /* UDMA off */
@@ -254,109 +254,17 @@ static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
}
/**
- * cmd64x_bmdma_stop - DMA stop callback
+ * cmd646r1_dma_stop - DMA stop callback
* @qc: Command in progress
*
- * Track the completion of live DMA commands and clear the
- * host->private_data DMA tracking flag as we do.
+ * Stub for now while investigating the r1 quirk in the old driver.
*/
-static void cmd64x_bmdma_stop(struct ata_queued_cmd *qc)
+static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
{
- struct ata_port *ap = qc->ap;
ata_bmdma_stop(qc);
- WARN_ON(ap->host->private_data != ap);
- ap->host->private_data = NULL;
-}
-
-/**
- * cmd64x_qc_defer - Defer logic for chip limits
- * @qc: queued command
- *
- * Decide whether we can issue the command. Called under the host lock.
- */
-
-static int cmd64x_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_host *host = qc->ap->host;
- struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
- int rc;
- int dma = 0;
-
- /* Apply the ATA rules first */
- rc = ata_std_qc_defer(qc);
- if (rc)
- return rc;
-
- if (qc->tf.protocol == ATAPI_PROT_DMA ||
- qc->tf.protocol == ATA_PROT_DMA)
- dma = 1;
-
- /* If the other port is not live then issue the command */
- if (alt == NULL || !alt->qc_active) {
- if (dma)
- host->private_data = qc->ap;
- return 0;
- }
- /* If there is a live DMA command then wait */
- if (host->private_data != NULL)
- return ATA_DEFER_PORT;
- if (dma)
- /* Cannot overlap our DMA command */
- return ATA_DEFER_PORT;
- return 0;
}
-/**
- * cmd64x_interrupt - ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
- *
- * Our interrupt handler for PCI IDE devices. Calls
- * ata_sff_host_intr() for each port that is flagging an IRQ. We cannot
- * use the defaults as we need to avoid touching status/altstatus during
- * a DMA.
- *
- * LOCKING:
- * Obtains host lock during operation.
- *
- * RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
- */
-irqreturn_t cmd64x_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct pci_dev *pdev = to_pci_dev(host->dev);
- unsigned int i;
- unsigned int handled = 0;
- unsigned long flags;
- static const u8 irq_reg[2] = { CFR, ARTTIM23 };
- static const u8 irq_mask[2] = { 1 << 2, 1 << 4 };
-
- /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
- spin_lock_irqsave(&host->lock, flags);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
- u8 reg;
-
- pci_read_config_byte(pdev, irq_reg[i], &reg);
- ap = host->ports[i];
- if (ap && (reg & irq_mask[i]) &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
- handled |= ata_sff_host_intr(ap, qc);
- }
- }
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
static struct scsi_host_template cmd64x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -365,8 +273,6 @@ static const struct ata_port_operations cmd64x_base_ops = {
.inherits = &ata_bmdma_port_ops,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
- .bmdma_stop = cmd64x_bmdma_stop,
- .qc_defer = cmd64x_qc_defer,
};
static struct ata_port_operations cmd64x_port_ops = {
@@ -376,6 +282,7 @@ static struct ata_port_operations cmd64x_port_ops = {
static struct ata_port_operations cmd646r1_port_ops = {
.inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd646r1_bmdma_stop,
.cable_detect = ata_cable_40wire,
};
@@ -383,7 +290,6 @@ static struct ata_port_operations cmd648_port_ops = {
.inherits = &cmd64x_base_ops,
.bmdma_stop = cmd648_bmdma_stop,
.cable_detect = cmd648_cable_detect,
- .qc_defer = ata_std_qc_defer
};
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -432,7 +338,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
u8 mrdmode;
int rc;
- struct ata_host *host;
rc = pcim_enable_device(pdev);
if (rc)
@@ -450,25 +355,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[0] = &cmd_info[3];
}
-
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, MRDMODE, &mrdmode);
mrdmode &= ~ 0x30; /* IRQ set up */
mrdmode |= 0x02; /* Memory read line enable */
pci_write_config_byte(pdev, MRDMODE, mrdmode);
+ /* Force PIO 0 here.. */
+
/* PPC specific fixup copied from old driver */
#ifdef CONFIG_PPC
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
- if (rc)
- return rc;
- /* We use this pointer to track the AP which has DMA running */
- host->private_data = NULL;
- pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, cmd64x_interrupt, &cmd64x_sht);
+ return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 9a09a1b..dd26bc7 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -8,7 +8,7 @@
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
*
*
* TODO
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.7"
+#define DRV_VERSION "0.3.8"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -264,7 +264,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
{
- void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
/* Tristate the bus */
iowrite8(0x80, bmdma+0x73);
@@ -274,9 +274,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
iowrite8(source, bmdma+0x7B);
iowrite8(0xC0, bmdma+0x79);
- /* Reset state machines */
- iowrite8(0x37, bmdma+0x70);
- iowrite8(0x37, bmdma+0x74);
+ /* Reset state machines, avoid enabling the disabled channels */
+ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
+ iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
/* Complete reset */
iowrite8(0x00, bmdma+0x79);
@@ -286,21 +286,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
iowrite8(0x00, bmdma+0x77);
}
-/* Check if our partner interface is busy */
-
-static int hpt3x2n_pair_idle(struct ata_port *ap)
-{
- struct ata_host *host = ap->host;
- struct ata_port *pair = host->ports[ap->port_no ^ 1];
-
- if (pair->hsm_task_state == HSM_ST_IDLE)
- return 1;
- return 0;
-}
-
static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
{
long flags = (long)ap->host->private_data;
+
/* See if we should use the DPLL */
if (writing)
return USE_DPLL; /* Needed for write */
@@ -309,20 +298,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
return 0;
}
+static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
+ int rc, flags = (long)ap->host->private_data;
+ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+
+ /* First apply the usual rules */
+ rc = ata_std_qc_defer(qc);
+ if (rc != 0)
+ return rc;
+
+ if ((flags & USE_DPLL) != dpll && alt->qc_active)
+ return ATA_DEFER_PORT;
+ return 0;
+}
+
static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
{
- struct ata_taskfile *tf = &qc->tf;
struct ata_port *ap = qc->ap;
int flags = (long)ap->host->private_data;
+ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
- if (hpt3x2n_pair_idle(ap)) {
- int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
- if ((flags & USE_DPLL) != dpll) {
- if (dpll == 1)
- hpt3x2n_set_clock(ap, 0x21);
- else
- hpt3x2n_set_clock(ap, 0x23);
- }
+ if ((flags & USE_DPLL) != dpll) {
+ flags &= ~USE_DPLL;
+ flags |= dpll;
+ ap->host->private_data = (void *)(long)flags;
+
+ hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
}
return ata_sff_qc_issue(qc);
}
@@ -339,6 +343,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt3x2n_bmdma_stop,
+
+ .qc_defer = hpt3x2n_qc_defer,
.qc_issue = hpt3x2n_qc_issue,
.cable_detect = hpt3x2n_cable_detect,
@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
unsigned int f_low, f_high;
int adjust;
unsigned long iobase = pci_resource_start(dev, 4);
- void *hpriv = NULL;
+ void *hpriv = (void *)USE_DPLL;
int rc;
rc = pcim_enable_device(dev);
@@ -539,7 +545,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* Set our private data up. We only need a few flags so we use
it directly */
if (pci_mhz > 60) {
- hpriv = (void *)PCI66;
+ hpriv = (void *)(PCI66 | USE_DPLL);
/*
* On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
* the MISC. register to stretch the UltraDMA Tss timing.
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d6f6956..37ef416 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -853,7 +853,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
return -EINVAL;
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
- res_cs0->end - res_cs1->start + 1);
+ resource_size(res_cs1));
if (!cs1)
return -ENOMEM;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a8a7be0..df8ee32 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -59,6 +59,7 @@
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
@@ -538,6 +539,7 @@ struct mv_port_signal {
struct mv_host_priv {
u32 hp_flags;
+ unsigned int board_idx;
u32 main_irq_mask;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
@@ -548,6 +550,10 @@ struct mv_host_priv {
u32 irq_cause_offset;
u32 irq_mask_offset;
u32 unmask_all_irqs;
+
+#if defined(CONFIG_HAVE_CLK)
+ struct clk *clk;
+#endif
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
@@ -2775,7 +2781,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
struct mv_port_priv *pp;
int edma_was_enabled;
- if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ if (ap->flags & ATA_FLAG_DISABLED) {
mv_unexpected_intr(ap, 0);
return;
}
@@ -3393,7 +3399,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
- writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+ writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
@@ -3854,7 +3860,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
/**
* mv_init_host - Perform some early initialization of the host.
* @host: ATA host to initialize
- * @board_idx: controller index
*
* If possible, do an early global reset of the host. Then do
* our port init and clear/unmask all/relevant host interrupts.
@@ -3862,13 +3867,13 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
* LOCKING:
* Inherited from caller.
*/
-static int mv_init_host(struct ata_host *host, unsigned int board_idx)
+static int mv_init_host(struct ata_host *host)
{
int rc = 0, n_hc, port, hc;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
- rc = mv_chip_id(host, board_idx);
+ rc = mv_chip_id(host, hpriv->board_idx);
if (rc)
goto done;
@@ -3905,14 +3910,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_port_init(&ap->ioaddr, port_mmio);
-
-#ifdef CONFIG_PCI
- if (!IS_SOC(hpriv)) {
- unsigned int offset = port_mmio - mmio;
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
- }
-#endif
}
for (hc = 0; hc < n_hc; hc++) {
@@ -4035,12 +4032,21 @@ static int mv_platform_probe(struct platform_device *pdev)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
+ hpriv->board_idx = chip_soc;
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
+#if defined(CONFIG_HAVE_CLK)
+ hpriv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hpriv->clk))
+ dev_notice(&pdev->dev, "cannot get clkdev\n");
+ else
+ clk_enable(hpriv->clk);
+#endif
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4049,12 +4055,12 @@ static int mv_platform_probe(struct platform_device *pdev)
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
- return rc;
+ goto err;
/* initialize adapter */
- rc = mv_init_host(host, chip_soc);
+ rc = mv_init_host(host);
if (rc)
- return rc;
+ goto err;
dev_printk(KERN_INFO, &pdev->dev,
"slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
@@ -4062,6 +4068,15 @@ static int mv_platform_probe(struct platform_device *pdev)
return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
IRQF_SHARED, &mv6_sht);
+err:
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(hpriv->clk)) {
+ clk_disable(hpriv->clk);
+ clk_put(hpriv->clk);
+ }
+#endif
+
+ return rc;
}
/*
@@ -4076,14 +4091,66 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
-
+#if defined(CONFIG_HAVE_CLK)
+ struct mv_host_priv *hpriv = host->private_data;
+#endif
ata_host_detach(host);
+
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(hpriv->clk)) {
+ clk_disable(hpriv->clk);
+ clk_put(hpriv->clk);
+ }
+#endif
return 0;
}
+#ifdef CONFIG_PM
+static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ if (host)
+ return ata_host_suspend(host, state);
+ else
+ return 0;
+}
+
+static int mv_platform_resume(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (host) {
+ struct mv_host_priv *hpriv = host->private_data;
+ const struct mv_sata_platform_data *mv_platform_data = \
+ pdev->dev.platform_data;
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (mv_platform_data->dram != NULL)
+ mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+
+ /* initialize adapter */
+ ret = mv_init_host(host);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return ret;
+ }
+ ata_host_resume(host);
+ }
+
+ return 0;
+}
+#else
+#define mv_platform_suspend NULL
+#define mv_platform_resume NULL
+#endif
+
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
.remove = __devexit_p(mv_platform_remove),
+ .suspend = mv_platform_suspend,
+ .resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -4094,6 +4161,9 @@ static struct platform_driver mv_platform_driver = {
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
+#ifdef CONFIG_PM
+static int mv_pci_device_resume(struct pci_dev *pdev);
+#endif
static struct pci_driver mv_pci_driver = {
@@ -4101,6 +4171,11 @@ static struct pci_driver mv_pci_driver = {
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = mv_pci_device_resume,
+#endif
+
};
/* move to PCI layer or libata core? */
@@ -4194,7 +4269,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
- int n_ports, rc;
+ int n_ports, port, rc;
if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -4208,6 +4283,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
+ hpriv->board_idx = board_idx;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -4230,8 +4306,17 @@ static int mv_pci_init_one(struct pci_dev *pdev,
if (rc)
return rc;
+ for (port = 0; port < host->n_ports; port++) {
+ struct ata_port *ap = host->ports[port];
+ void __iomem *port_mmio = mv_port_base(hpriv->base, port);
+ unsigned int offset = port_mmio - hpriv->base;
+
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
+
/* initialize adapter */
- rc = mv_init_host(host, board_idx);
+ rc = mv_init_host(host);
if (rc)
return rc;
@@ -4247,6 +4332,27 @@ static int mv_pci_init_one(struct pci_dev *pdev,
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
+
+#ifdef CONFIG_PM
+static int mv_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ /* initialize adapter */
+ rc = mv_init_host(host);
+ if (rc)
+ return rc;
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
#endif
static int mv_platform_probe(struct platform_device *pdev);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 07d8d00..6330628 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -862,7 +862,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
if (port_status & PDC_DRIVE_ERR)
ac_err_mask |= AC_ERR_DEV;
if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
- ac_err_mask |= AC_ERR_HSM;
+ ac_err_mask |= AC_ERR_OTHER;
if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
ac_err_mask |= AC_ERR_ATA_BUS;
if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index f734b34..25a4c86 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -557,7 +557,7 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
} /* while */
// Move this VCI number into this location of the CBR Sched table.
- memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
+ memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
dev->CbrRemEntries--;
toBeAssigned--;
} /* while */
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 63c143e..c0c5a43 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -703,9 +703,9 @@ int bus_add_driver(struct device_driver *drv)
return 0;
out_unregister:
+ kobject_put(&priv->kobj);
kfree(drv->p);
drv->p = NULL;
- kobject_put(&priv->kobj);
out_put_bus:
bus_put(bus);
return error;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746d..6e2c3b0 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
+
+ kfree(cp);
}
static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f1290cb..2820257 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -446,7 +446,8 @@ struct kset *devices_kset;
* @dev: device.
* @attr: device attribute descriptor.
*/
-int device_create_file(struct device *dev, struct device_attribute *attr)
+int device_create_file(struct device *dev,
+ const struct device_attribute *attr)
{
int error = 0;
if (dev)
@@ -459,7 +460,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr)
* @dev: device.
* @attr: device attribute descriptor.
*/
-void device_remove_file(struct device *dev, struct device_attribute *attr)
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr)
{
if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
@@ -470,7 +472,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr)
* @dev: device.
* @attr: device binary attribute descriptor.
*/
-int device_create_bin_file(struct device *dev, struct bin_attribute *attr)
+int device_create_bin_file(struct device *dev,
+ const struct bin_attribute *attr)
{
int error = -EINVAL;
if (dev)
@@ -484,7 +487,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file);
* @dev: device.
* @attr: device binary attribute descriptor.
*/
-void device_remove_bin_file(struct device *dev, struct bin_attribute *attr)
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr)
{
if (dev)
sysfs_remove_bin_file(&dev->kobj, attr);
@@ -905,8 +909,10 @@ int device_add(struct device *dev)
dev->init_name = NULL;
}
- if (!dev_name(dev))
+ if (!dev_name(dev)) {
+ error = -EINVAL;
goto name_error;
+ }
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 50375bb..42ae452 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -32,7 +32,7 @@ static int dev_mount = 1;
static int dev_mount;
#endif
-static rwlock_t dirlock;
+static DEFINE_MUTEX(dirlock);
static int __init mount_param(char *str)
{
@@ -93,7 +93,7 @@ static int create_path(const char *nodepath)
{
int err;
- read_lock(&dirlock);
+ mutex_lock(&dirlock);
err = dev_mkdir(nodepath, 0755);
if (err == -ENOENT) {
char *path;
@@ -101,8 +101,10 @@ static int create_path(const char *nodepath)
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ err = -ENOMEM;
+ goto out;
+ }
s = path;
for (;;) {
s = strchr(s, '/');
@@ -117,7 +119,8 @@ static int create_path(const char *nodepath)
}
kfree(path);
}
- read_unlock(&dirlock);
+out:
+ mutex_unlock(&dirlock);
return err;
}
@@ -229,7 +232,7 @@ static int delete_path(const char *nodepath)
if (!path)
return -ENOMEM;
- write_lock(&dirlock);
+ mutex_lock(&dirlock);
for (;;) {
char *base;
@@ -241,7 +244,7 @@ static int delete_path(const char *nodepath)
if (err)
break;
}
- write_unlock(&dirlock);
+ mutex_unlock(&dirlock);
kfree(path);
return err;
@@ -351,8 +354,7 @@ int __init devtmpfs_init(void)
{
int err;
struct vfsmount *mnt;
-
- rwlock_init(&dirlock);
+ char options[] = "mode=0755";
err = register_filesystem(&dev_fs_type);
if (err) {
@@ -361,7 +363,7 @@ int __init devtmpfs_init(void)
return err;
}
- mnt = kern_mount_data(&dev_fs_type, "mode=0755");
+ mnt = kern_mount_data(&dev_fs_type, options);
if (IS_ERR(mnt)) {
err = PTR_ERR(mnt);
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index f367885..90c9fff 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
* @attr: driver attribute descriptor.
*/
int driver_create_file(struct device_driver *drv,
- struct driver_attribute *attr)
+ const struct driver_attribute *attr)
{
int error;
if (drv)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
* @attr: driver attribute descriptor.
*/
void driver_remove_file(struct device_driver *drv,
- struct driver_attribute *attr)
+ const struct driver_attribute *attr)
{
if (drv)
sysfs_remove_file(&drv->p->kobj, &attr->attr);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 989429c..bd02505 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_notifier);
+static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
+
+int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(register_memory_isolate_notifier);
+
+void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(unregister_memory_isolate_notifier);
+
/*
* register_memory - Setup a sysfs device for a memory block
*/
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
+int memory_isolate_notify(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
+}
+
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
@@ -292,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
static ssize_t
print_block_size(struct class *class, char *buf)
{
- return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
+ return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
}
static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
@@ -341,6 +360,64 @@ static inline int memory_probe_init(void)
}
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for offlining pages of memory
+ */
+
+/* Soft offline a page */
+static ssize_t
+store_soft_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ ret = soft_offline_page(pfn_to_page(pfn), 0);
+ return ret == 0 ? count : ret;
+}
+
+/* Forcibly offline a page, including killing processes. */
+static ssize_t
+store_hard_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = __memory_failure(pfn, 0, 0);
+ return ret ? ret : count;
+}
+
+static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+
+static __init int memory_fail_init(void)
+{
+ int err;
+
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_soft_offline_page.attr);
+ if (!err)
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_hard_offline_page.attr);
+ return err;
+}
+#else
+static inline int memory_fail_init(void)
+{
+ return 0;
+}
+#endif
+
/*
* Note that phys_device is optional. It is here to allow for
* differentiation between which *physical* devices each
@@ -473,6 +550,9 @@ int __init memory_dev_init(void)
err = memory_probe_init();
if (!ret)
ret = err;
+ err = memory_fail_init();
+ if (!ret)
+ ret = err;
err = block_size_init();
if (!ret)
ret = err;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9d2ee25..58efaf2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -441,6 +441,7 @@ error:
platform_device_put(pdev);
return ERR_PTR(retval);
}
+EXPORT_SYMBOL_GPL(platform_device_register_data);
static int platform_drv_probe(struct device *_dev)
{
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8aa2443..a5142bd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -23,8 +23,8 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/resume-trace.h>
-#include <linux/rwsem.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include "../base.h"
#include "power.h"
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev)
list_move_tail(&dev->power.entry, &dpm_list);
}
+static ktime_t initcall_debug_start(struct device *dev)
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ if (initcall_debug) {
+ pr_info("calling %s+ @ %i\n",
+ dev_name(dev), task_pid_nr(current));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+ int error)
+{
+ ktime_t delta, rettime;
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
+ error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ }
+}
+
/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
@@ -172,6 +198,9 @@ static int pm_op(struct device *dev,
pm_message_t state)
{
int error = 0;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
switch (state.event) {
#ifdef CONFIG_SUSPEND
@@ -219,6 +248,9 @@ static int pm_op(struct device *dev,
default:
error = -EINVAL;
}
+
+ initcall_debug_report(dev, calltime, error);
+
return error;
}
@@ -236,6 +268,13 @@ static int pm_noirq_op(struct device *dev,
pm_message_t state)
{
int error = 0;
+ ktime_t calltime, delta, rettime;
+
+ if (initcall_debug) {
+ pr_info("calling %s_i+ @ %i\n",
+ dev_name(dev), task_pid_nr(current));
+ calltime = ktime_get();
+ }
switch (state.event) {
#ifdef CONFIG_SUSPEND
@@ -283,6 +322,15 @@ static int pm_noirq_op(struct device *dev,
default:
error = -EINVAL;
}
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ printk("initcall %s_i+ returned %d after %Ld usecs\n",
+ dev_name(dev), error,
+ (unsigned long long)ktime_to_ns(delta) >> 10);
+ }
+
return error;
}
@@ -324,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}
+static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ s64 usecs64;
+ int usecs;
+
+ calltime = ktime_get();
+ usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs = usecs64;
+ if (usecs == 0)
+ usecs = 1;
+ pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -341,14 +406,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (!dev->bus)
- goto End;
-
- if (dev->bus->pm) {
+ if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
- End:
+
TRACE_RESUME(error);
return error;
}
@@ -363,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
transition_started = false;
@@ -376,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state)
pm_dev_err(dev, state, " early", error);
}
mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, "early");
resume_device_irqs();
}
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
+ * legacy_resume - Execute a legacy (bus or class) resume callback for device.
+ * @dev: Device to resume.
+ * @cb: Resume callback to execute.
+ */
+static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -400,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->resume) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->resume(dev);
+ error = legacy_resume(dev, dev->bus->resume);
}
if (error)
goto End;
@@ -421,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->resume(dev);
+ error = legacy_resume(dev, dev->class->resume);
}
}
End:
@@ -441,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -469,6 +554,7 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, NULL);
}
/**
@@ -521,7 +607,7 @@ static void dpm_complete(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
device_complete(dev, state);
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
mutex_lock(&dpm_list_mtx);
}
@@ -584,10 +670,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
- if (!dev->bus)
- return 0;
-
- if (dev->bus->pm) {
+ if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
@@ -604,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
int dpm_suspend_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
@@ -619,11 +703,35 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_noirq(resume_event(state));
+ else
+ dpm_show_time(starttime, state, "late");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * @dev: Device to suspend.
+ * @state: PM transition of the system being carried out.
+ * @cb: Suspend callback to execute.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+ int (*cb)(struct device *dev, pm_message_t state))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev, state);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -640,8 +748,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->suspend(dev, state);
- suspend_report_result(dev->class->suspend, error);
+ error = legacy_suspend(dev, state, dev->class->suspend);
}
if (error)
goto End;
@@ -662,8 +769,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->suspend(dev, state);
- suspend_report_result(dev->bus->suspend, error);
+ error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
End:
@@ -679,6 +785,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
static int dpm_suspend(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
int error = 0;
INIT_LIST_HEAD(&list);
@@ -704,6 +811,8 @@ static int dpm_suspend(pm_message_t state)
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ if (!error)
+ dpm_show_time(starttime, state, NULL);
return error;
}
@@ -772,7 +881,7 @@ static int dpm_prepare(pm_message_t state)
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
/* Wake-up requested during system sleep transition. */
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
error = -EBUSY;
} else {
error = device_prepare(dev, state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 5a01ece..f8b044e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev)
dev->bus->pm->runtime_idle(dev);
spin_lock_irq(&dev->power.lock);
+ } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->type->pm->runtime_idle(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_idle) {
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->class->pm->runtime_idle(dev);
+
+ spin_lock_irq(&dev->power.lock);
}
dev->power.idle_notification = false;
@@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
spin_lock_irq(&dev->power.lock);
dev->power.runtime_error = retval;
+ } else if (dev->type && dev->type->pm
+ && dev->type->pm->runtime_suspend) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->type->pm->runtime_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_suspend) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->class->pm->runtime_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
} else {
retval = -ENOSYS;
}
@@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
spin_lock_irq(&dev->power.lock);
dev->power.runtime_error = retval;
+ } else if (dev->type && dev->type->pm
+ && dev->type->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->type->pm->runtime_resume(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->class->pm->runtime_resume(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
} else {
retval = -ENOSYS;
}
@@ -701,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume);
* @dev: Device to handle.
* @sync: If set and the device is suspended, resume it synchronously.
*
- * Increment the usage count of the device and if it was zero previously,
- * resume it or submit a resume request for it, depending on the value of @sync.
+ * Increment the usage count of the device and resume it or submit a resume
+ * request for it, depending on the value of @sync.
*/
int __pm_runtime_get(struct device *dev, bool sync)
{
- int retval = 1;
+ int retval;
- if (atomic_add_return(1, &dev->power.usage_count) == 1)
- retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
+ atomic_inc(&dev->power.usage_count);
+ retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
return retval;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index eb4fa19..ce1fa92 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = {
static struct DAC960_privdata DAC960_LP_privdata = {
.HardwareType = DAC960_LP_Controller,
- .FirmwareType = DAC960_LP_Controller,
+ .FirmwareType = DAC960_V2_Controller,
.InterruptHandler = DAC960_LP_InterruptHandler,
.MemoryWindowSize = DAC960_LP_RegisterWindowSize,
};
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 13bb69d..64a223b 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
part_stat_unlock();
}
-/*
- * Ensure we don't create aliases in VI caches
- */
-static inline void
-killalias(struct bio *bio)
-{
- struct bio_vec *bv;
- int i;
-
- if (bio_data_dir(bio) == READ)
- __bio_for_each_segment(bv, bio, i, 0) {
- flush_dcache_page(bv->bv_page);
- }
-}
-
void
aoecmd_ata_rsp(struct sk_buff *skb)
{
@@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
if (buf->flags & BUFFL_FAIL)
bio_endio(buf->bio, -EIO);
else {
- killalias(buf->bio);
+ bio_flush_dcache_pages(buf->bio);
bio_endio(buf->bio, 0);
}
mempool_free(buf, d->bufpool);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594..9291614 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
if (*pos > h->highest_lun)
return 0;
+ if (drv == NULL) /* it's possible for h->drv[] to have holes. */
+ return 0;
+
if (drv->heads == 0)
return 0;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index f4acd04..df09837 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -3,7 +3,7 @@
#
comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
- depends on !PROC_FS || !INET || !CONNECTOR
+ depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2312d78..2bf3a6e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1275,7 +1275,7 @@ struct bm_extent {
#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32
+#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
#else
@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *,
- struct drbd_backing_dev *);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
@@ -1490,7 +1489,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
-extern struct file_operations drbd_proc_fops;
+extern const struct file_operations drbd_proc_fops;
extern const char *drbd_conn_str(enum drbd_conns s);
extern const char *drbd_role_str(enum drbd_role s);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 157d1e4..ab871e0 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <asm/uaccess.h>
#include <asm/types.h>
@@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait;
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
-static struct block_device_operations drbd_ops = {
+static const struct block_device_operations drbd_ops = {
.owner = THIS_MODULE,
.open = drbd_open,
.release = drbd_release,
@@ -1299,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
dev_err(DEV, "Sending state in drbd_io_error() failed\n");
}
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
lc_destroy(mdev->resync);
mdev->resync = NULL;
lc_destroy(mdev->act_log);
@@ -2973,7 +2973,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
goto out_no_q;
mdev->rq_queue = q;
q->queuedata = mdev;
- blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
disk = alloc_disk(1);
if (!disk)
@@ -2997,6 +2996,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
q->backing_dev_info.congested_data = mdev;
blk_queue_make_request(q, drbd_make_request_26);
+ blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
q->queue_lock = &mdev->req_lock; /* needed since we use */
@@ -3623,7 +3623,7 @@ _drbd_fault_random(struct fault_random_state *rsp)
{
long refresh;
- if (--rsp->count < 0) {
+ if (!rsp->count--) {
get_random_bytes(&refresh, sizeof(refresh));
rsp->state += refresh;
rsp->count = FAULT_RANDOM_REFRESH;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4e0726a..1292e06 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev);
+ size = drbd_new_dev_size(mdev, mdev->ldev, force);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
@@ -596,7 +596,7 @@ out:
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
m_size = drbd_get_max_capacity(bdev);
+ if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
+ dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+ p_size = m_size;
+ }
+
if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size);
} else {
@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
+ drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TO_SMALL;
goto force_diskless_dec;
@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags);
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, 0);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
+ if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
retcode = ERR_AUTH_ALG_ND;
goto fail;
}
@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, rs.resize_force);
drbd_md_sync(mdev);
put_ldev(mdev);
if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index bdd0b49..df8ad96 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file);
struct proc_dir_entry *drbd_proc;
-struct file_operations drbd_proc_fops = {
+const struct file_operations drbd_proc_fops = {
.owner = THIS_MODULE,
.open = drbd_proc_open,
.read = seq_read,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c548f24..d065c64 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -28,7 +28,6 @@
#include <asm/uaccess.h>
#include <net/sock.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -879,9 +878,13 @@ retry:
if (mdev->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- if (!drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(mdev)) {
+ case -1:
dev_err(DEV, "Authentication of peer failed\n");
return -1;
+ case 0:
+ dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ return 0;
}
}
@@ -1202,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
case WO_bdev_flush:
case WO_drain_io:
- D_ASSERT(rv == FE_STILL_LIVE);
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+ if (rv == FE_STILL_LIVE) {
+ set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+ }
if (rv == FE_RECYCLED)
return TRUE;
@@ -1220,7 +1224,7 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
if (!epoch) {
dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
- issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
+ issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
if (issue_flush) {
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
@@ -2866,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev) <
+ if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
drbd_get_capacity(mdev->this_bdev) &&
mdev->state.disk >= D_OUTDATED &&
mdev->state.conn < C_CONNECTED) {
@@ -2881,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
#undef min_not_zero
if (get_ldev(mdev)) {
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, 0);
put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
@@ -3831,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
- return 0;
+ return -1;
}
#else
#define CHALLENGE_LEN 64
+
+/* Return value:
+ 1 - auth succeeded,
+ 0 - failed, try again (network error),
+ -1 - auth failed, don't try again.
+*/
+
static int drbd_do_auth(struct drbd_conf *mdev)
{
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -3855,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
(u8 *)mdev->net_conf->shared_secret, key_len);
if (rv) {
dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3878,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (p.length > CHALLENGE_LEN*2) {
dev_err(DEV, "expected AuthChallenge payload too big.\n");
- rv = 0;
+ rv = -1;
goto fail;
}
peers_ch = kmalloc(p.length, GFP_NOIO);
if (peers_ch == NULL) {
dev_err(DEV, "kmalloc of peers_ch failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3901,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
dev_err(DEV, "kmalloc of response failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3911,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3945,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
}
right_response = kmalloc(resp_size, GFP_NOIO);
- if (response == NULL) {
+ if (right_response == NULL) {
dev_err(DEV, "kmalloc of right_response failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3956,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3965,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (rv)
dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
resp_size, mdev->net_conf->cram_hmac_alg);
+ else
+ rv = -1;
fail:
kfree(peers_ch);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index ed8796f..b453c2b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/drbd.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
@@ -34,7 +33,6 @@
#include <linux/mm_inline.h>
#include <linux/slab.h>
#include <linux/random.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index e0339aa..02b2583 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev)
err = -EINVAL;
goto probe_err_2;
}
- host->dev_base = ioremap(rsc->start , rsc->end + 1);
+ host->dev_base = ioremap(rsc->start, resource_size(rsc));
if (!host->dev_base) {
printk(KERN_ERR "%s:%d ioremap fail\n",
__func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2ddf03a..68b5957 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
pkt_kobj_remove(pd->kobj_stat);
pkt_kobj_remove(pd->kobj_wqueue);
if (class_pktcdvd)
- device_destroy(class_pktcdvd, pd->pkt_dev);
+ device_unregister(pd->dev);
}
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 652367a..058fbcc 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -195,5 +195,16 @@ config BT_MRVL_SDIO
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
-endmenu
+config BT_ATH3K
+ tristate "Atheros firmware download driver"
+ depends on BT_HCIBTUSB
+ select FW_LOADER
+ help
+ Bluetooth firmware download driver.
+ This driver loads the firmware into the Atheros Bluetooth
+ chipset.
+ Say Y here to compile support for "Atheros firmware download driver"
+ into the kernel or say M to compile it as module (ath3k).
+
+endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b3f57d2..7e5aed5 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
+obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
new file mode 100644
index 0000000..add9485
--- /dev/null
+++ b/drivers/bluetooth/ath3k.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <net/bluetooth/bluetooth.h>
+
+#define VERSION "1.0"
+
+
+static struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 */
+ { USB_DEVICE(0x0CF3, 0x3000) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ath3k_table);
+
+#define USB_REQ_DFU_DNLOAD 1
+#define BULK_SIZE 4096
+
+struct ath3k_data {
+ struct usb_device *udev;
+ u8 *fw_data;
+ u32 fw_size;
+ u32 fw_sent;
+};
+
+static int ath3k_load_firmware(struct ath3k_data *data,
+ unsigned char *firmware,
+ int count)
+{
+ u8 *send_buf;
+ int err, pipe, len, size, sent = 0;
+
+ BT_DBG("ath3k %p udev %p", data, data->udev);
+
+ pipe = usb_sndctrlpipe(data->udev, 0);
+
+ if ((usb_control_msg(data->udev, pipe,
+ USB_REQ_DFU_DNLOAD,
+ USB_TYPE_VENDOR, 0, 0,
+ firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+ BT_ERR("Can't change to loading configuration err");
+ return -EBUSY;
+ }
+ sent += 20;
+ count -= 20;
+
+ send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ if (!send_buf) {
+ BT_ERR("Can't allocate memory chunk for firmware");
+ return -ENOMEM;
+ }
+
+ while (count) {
+ size = min_t(uint, count, BULK_SIZE);
+ pipe = usb_sndbulkpipe(data->udev, 0x02);
+ memcpy(send_buf, firmware + sent, size);
+
+ err = usb_bulk_msg(data->udev, pipe, send_buf, size,
+ &len, 3000);
+
+ if (err || (len != size)) {
+ BT_ERR("Error in firmware loading err = %d,"
+ "len = %d, size = %d", err, len, size);
+ goto error;
+ }
+
+ sent += size;
+ count -= size;
+ }
+
+ kfree(send_buf);
+ return 0;
+
+error:
+ kfree(send_buf);
+ return err;
+}
+
+static int ath3k_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ const struct firmware *firmware;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct ath3k_data *data;
+ int size;
+
+ BT_DBG("intf %p id %p", intf, id);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->udev = udev;
+
+ if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
+ kfree(data);
+ return -EIO;
+ }
+
+ size = max_t(uint, firmware->size, 4096);
+ data->fw_data = kmalloc(size, GFP_KERNEL);
+ if (!data->fw_data) {
+ release_firmware(firmware);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ memcpy(data->fw_data, firmware->data, firmware->size);
+ data->fw_size = firmware->size;
+ data->fw_sent = 0;
+ release_firmware(firmware);
+
+ usb_set_intfdata(intf, data);
+ if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
+ usb_set_intfdata(intf, NULL);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath3k_disconnect(struct usb_interface *intf)
+{
+ struct ath3k_data *data = usb_get_intfdata(intf);
+
+ BT_DBG("ath3k_disconnect intf %p", intf);
+
+ kfree(data->fw_data);
+ kfree(data);
+}
+
+static struct usb_driver ath3k_driver = {
+ .name = "ath3k",
+ .probe = ath3k_probe,
+ .disconnect = ath3k_disconnect,
+ .id_table = ath3k_table,
+};
+
+static int __init ath3k_init(void)
+{
+ BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
+ return usb_register(&ath3k_driver);
+}
+
+static void __exit ath3k_exit(void)
+{
+ usb_deregister(&ath3k_driver);
+}
+
+module_init(ath3k_init);
+module_exit(ath3k_exit);
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("ath3k-1.fw");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 2acdc60..c2cf811 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -503,7 +503,9 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
unsigned int iobase;
unsigned char reg;
- BUG_ON(!info->hdev);
+ if (!info || !info->hdev)
+ /* our irq handler is shared */
+ return IRQ_NONE;
if (!test_bit(CARD_READY, &(info->hw_state)))
return IRQ_HANDLED;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index d814a27..9f5926a 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -345,7 +345,9 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
int iir;
irqreturn_t r = IRQ_NONE;
- BUG_ON(!info->hdev);
+ if (!info || !info->hdev)
+ /* our irq handler is shared */
+ return IRQ_NONE;
iobase = info->p_dev->io.BasePort1;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa..57d965b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
exit:
sdio_release_host(card->func);
+ kfree(tmpbuf);
return ret;
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index d339464..91c5230 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -295,7 +295,9 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
int iir, lsr;
irqreturn_t r = IRQ_NONE;
- BUG_ON(!info->hdev);
+ if (!info || !info->hdev)
+ /* our irq handler is shared */
+ return IRQ_NONE;
iobase = info->p_dev->io.BasePort1;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 4d29059..a699f09 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
return;
usb_anchor_urb(urb, &data->bulk_anchor);
+ usb_mark_last_busy(data->udev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 4f02a6f..6975919 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -299,7 +299,9 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
int iir, lsr;
irqreturn_t r = IRQ_NONE;
- BUG_ON(!info->hdev);
+ if (!info || !info->hdev)
+ /* our irq handler is shared */
+ return IRQ_NONE;
iobase = info->p_dev->io.BasePort1;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 6f31c94..e023682 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -502,7 +502,7 @@ config BRIQ_PANEL
config BFIN_OTP
tristate "Blackfin On-Chip OTP Memory Support"
- depends on BLACKFIN && (BF52x || BF54x)
+ depends on BLACKFIN && (BF51x || BF52x || BF54x)
default y
help
If you say Y here, you will get support for a character device
@@ -669,7 +669,7 @@ config VIRTIO_CONSOLE
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && HVC_CONSOLE
help
Partitionable IBM Power5 ppc64 machines allow hosting of
firmware virtual consoles from one Linux partition by
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 2fb2e6c..fd50ead 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -728,6 +728,7 @@ int __init agp_amd64_init(void)
if (agp_off)
return -EINVAL;
+
err = pci_register_driver(&agp_amd64_pci_driver);
if (err < 0)
return err;
@@ -764,19 +765,28 @@ int __init agp_amd64_init(void)
return err;
}
+static int __init agp_amd64_mod_init(void)
+{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return agp_bridges_found ? 0 : -ENODEV;
+#endif
+ return agp_amd64_init();
+}
+
static void __exit agp_amd64_cleanup(void)
{
+#ifndef MODULE
+ if (gart_iommu_aperture)
+ return;
+#endif
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);
}
-/* On AMD64 the PCI driver needs to initialize this driver early
- for the IOMMU, so it has to be called via a backdoor. */
-#ifndef CONFIG_GART_IOMMU
-module_init(agp_amd64_init);
+module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
-#endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index a56ca08..c3ab46d 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -285,18 +285,22 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
{
int error;
- if (agp_off)
- return -ENODEV;
+ if (agp_off) {
+ error = -ENODEV;
+ goto err_put_bridge;
+ }
if (!bridge->dev) {
printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_put_bridge;
}
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_put_bridge;
}
error = agp_backend_initialize(bridge);
@@ -326,6 +330,7 @@ frontend_err:
agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
+err_put_bridge:
agp_put_bridge(bridge);
return error;
}
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 9047b27..58752b7 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
handle = obj;
do {
status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
/* TBD check _CID also */
- info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
kfree(info);
if (match) {
@@ -509,6 +508,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
handle = parent;
} while (ACPI_SUCCESS(status));
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* found no enclosing IOC */
+
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
return AE_OK;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 30c36ac..3999a5f 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2460,10 +2460,14 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
&bridge->mode);
}
- if (bridge->driver->mask_memory == intel_i965_mask_memory)
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e989f67..3d9c61e 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out;
}
}
-out_unlock:
- mutex_unlock(&rng_mutex);
out:
return ret ? : err;
+out_unlock:
+ mutex_unlock(&rng_mutex);
+ goto out;
}
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index bdaef8e9..64fe0a7 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -114,7 +114,7 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
-static struct virtio_driver virtio_rng = {
+static struct virtio_driver virtio_rng_driver = {
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
@@ -124,12 +124,12 @@ static struct virtio_driver virtio_rng = {
static int __init init(void)
{
- return register_virtio_driver(&virtio_rng);
+ return register_virtio_driver(&virtio_rng_driver);
}
static void __exit fini(void)
{
- unregister_virtio_driver(&virtio_rng);
+ unregister_virtio_driver(&virtio_rng_driver);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index d2e6980..176f175 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -64,6 +64,7 @@
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
+#include <linux/pnp.h>
#ifdef CONFIG_PPC_OF
#include <linux/of_device.h>
@@ -1919,7 +1920,7 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static __devinit int try_init_acpi(struct SPMITable *spmi)
+static __devinit int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
u8 addr_space;
@@ -1940,7 +1941,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
return -ENOMEM;
}
- info->addr_source = "ACPI";
+ info->addr_source = "SPMI";
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2002,7 +2003,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
return 0;
}
-static __devinit void acpi_find_bmc(void)
+static __devinit void spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2020,9 +2021,106 @@ static __devinit void acpi_find_bmc(void)
if (status != AE_OK)
return;
- try_init_acpi(spmi);
+ try_init_spmi(spmi);
}
}
+
+static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ struct acpi_device *acpi_dev;
+ struct smi_info *info;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+
+ acpi_dev = pnp_acpi_device(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = "ACPI";
+
+ handle = acpi_dev->handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ goto err_free;
+
+ switch (tmp) {
+ case 1:
+ info->si_type = SI_KCS;
+ break;
+ case 2:
+ info->si_type = SI_SMIC;
+ break;
+ case 3:
+ info->si_type = SI_BT;
+ break;
+ default:
+ dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+ goto err_free;
+ }
+
+ if (pnp_port_valid(dev, 0)) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ info->io.addr_data = pnp_port_start(dev, 0);
+ } else if (pnp_mem_valid(dev, 0)) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ info->io.addr_data = pnp_mem_start(dev, 0);
+ } else {
+ dev_err(&dev->dev, "no I/O or memory address\n");
+ goto err_free;
+ }
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ info->irq = tmp;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else if (pnp_irq_valid(dev, 0)) {
+ info->irq = pnp_irq(dev, 0);
+ info->irq_setup = std_irq_setup;
+ }
+
+ info->dev = &acpi_dev->dev;
+ pnp_set_drvdata(dev, info);
+
+ return try_smi_init(info);
+
+err_free:
+ kfree(info);
+ return -EINVAL;
+}
+
+static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
+{
+ struct smi_info *info = pnp_get_drvdata(dev);
+
+ cleanup_one_si(info);
+}
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ {"IPI0001", 0},
+ {"", 0},
+};
+
+static struct pnp_driver ipmi_pnp_driver = {
+ .name = DEVICE_NAME,
+ .probe = ipmi_pnp_probe,
+ .remove = __devexit_p(ipmi_pnp_remove),
+ .id_table = pnp_dev_table,
+};
#endif
#ifdef CONFIG_DMI
@@ -2202,7 +2300,6 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
int rv;
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- int first_reg_offset = 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -2241,9 +2338,6 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->addr_source_cleanup = ipmi_pci_cleanup;
info->addr_source_data = pdev;
- if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
- first_reg_offset = 1;
-
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
@@ -3108,7 +3202,10 @@ static __devinit int init_ipmi_si(void)
#endif
#ifdef CONFIG_ACPI
- acpi_find_bmc();
+ spmi_find_bmc();
+#endif
+#ifdef CONFIG_ACPI
+ pnp_register_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PCI
@@ -3233,6 +3330,9 @@ static __exit void cleanup_ipmi_si(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
+#ifdef CONFIG_ACPI
+ pnp_unregister_driver(&ipmi_pnp_driver);
+#endif
#ifdef CONFIG_PPC_OF
of_unregister_platform_driver(&ipmi_of_platform_driver);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 5619007..f706b1d 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -233,7 +233,8 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
}
/*
- * Making beeps and bells.
+ * Making beeps and bells. Note that we prefer beeps to bells, but when
+ * shutting the sound off we do both.
*/
static int kd_sound_helper(struct input_handle *handle, void *data)
@@ -242,9 +243,12 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
struct input_dev *dev = handle->dev;
if (test_bit(EV_SND, dev->evbit)) {
- if (test_bit(SND_TONE, dev->sndbit))
+ if (test_bit(SND_TONE, dev->sndbit)) {
input_inject_event(handle, EV_SND, SND_TONE, *hz);
- if (test_bit(SND_BELL, handle->dev->sndbit))
+ if (*hz)
+ return 0;
+ }
+ if (test_bit(SND_BELL, dev->sndbit))
input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index be832b6..48788db 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
unsigned long p = *ppos;
ssize_t low_count, read, sz;
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+ int err = 0;
read = 0;
if (p < (unsigned long) high_memory) {
@@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
return -ENOMEM;
while (count > 0) {
sz = size_inside_page(p, count);
+ if (!is_vmalloc_or_module_addr((void *)p)) {
+ err = -ENXIO;
+ break;
+ }
sz = vread(kbuf, (char *)p, sz);
if (!sz)
break;
if (copy_to_user(buf, kbuf, sz)) {
- free_page((unsigned long)kbuf);
- return -EFAULT;
+ err = -EFAULT;
+ break;
}
count -= sz;
buf += sz;
@@ -455,8 +460,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
}
free_page((unsigned long)kbuf);
}
- *ppos = p;
- return read;
+ *ppos = p;
+ return read ? read : err;
}
@@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
ssize_t wrote = 0;
ssize_t virtr = 0;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+ int err = 0;
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
@@ -540,14 +546,16 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
unsigned long sz = size_inside_page(p, count);
unsigned long n;
+ if (!is_vmalloc_or_module_addr((void *)p)) {
+ err = -ENXIO;
+ break;
+ }
n = copy_from_user(kbuf, buf, sz);
if (n) {
- if (wrote + virtr)
- break;
- free_page((unsigned long)kbuf);
- return -EFAULT;
+ err = -EFAULT;
+ break;
}
- sz = vwrite(kbuf, (char *)p, sz);
+ vwrite(kbuf, (char *)p, sz);
count -= sz;
buf += sz;
virtr += sz;
@@ -556,8 +564,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
free_page((unsigned long)kbuf);
}
- *ppos = p;
- return virtr + wrote;
+ *ppos = p;
+ return virtr + wrote ? : err;
}
#endif
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d3400b2..2ad7d37 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -358,7 +358,7 @@ struct port {
u8 update_flow_control;
struct ctrl_ul ctrl_ul;
struct ctrl_dl ctrl_dl;
- struct kfifo *fifo_ul;
+ struct kfifo fifo_ul;
void __iomem *dl_addr[2];
u32 dl_size[2];
u8 toggle_dl;
@@ -685,8 +685,6 @@ static int nozomi_read_config_table(struct nozomi *dc)
dump_table(dc);
for (i = PORT_MDM; i < MAX_PORT; i++) {
- dc->port[i].fifo_ul =
- kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
}
@@ -798,7 +796,7 @@ static int send_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
/* Get data from tty and place in buf for now */
- size = __kfifo_get(port->fifo_ul, dc->send_buf,
+ size = kfifo_out(&port->fifo_ul, dc->send_buf,
ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
if (size == 0) {
@@ -988,11 +986,11 @@ static int receive_flow_control(struct nozomi *dc)
} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
- if (__kfifo_len(dc->port[port].fifo_ul)) {
+ if (kfifo_len(&dc->port[port].fifo_ul)) {
DBG1("Enable interrupt (0x%04X) on port: %d",
enable_ier, port);
DBG1("Data in buffer [%d], enable transmit! ",
- __kfifo_len(dc->port[port].fifo_ul));
+ kfifo_len(&dc->port[port].fifo_ul));
enable_transmit_ul(port, dc);
} else {
DBG1("No data in buffer...");
@@ -1433,6 +1431,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
goto err_free_sbuf;
}
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (kfifo_alloc(&dc->port[i].fifo_ul,
+ FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) {
+ dev_err(&pdev->dev,
+ "Could not allocate kfifo buffer\n");
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+ }
+
spin_lock_init(&dc->spin_mutex);
nozomi_setup_private_data(dc);
@@ -1445,7 +1453,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
NOZOMI_NAME, dc);
if (unlikely(ret)) {
dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
- goto err_free_sbuf;
+ goto err_free_kfifo;
}
DBG1("base_addr: %p", dc->base_addr);
@@ -1464,13 +1472,28 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
dc->state = NOZOMI_STATE_ENABLED;
for (i = 0; i < MAX_PORT; i++) {
+ struct device *tty_dev;
+
mutex_init(&dc->port[i].tty_sem);
tty_port_init(&dc->port[i].port);
- tty_register_device(ntty_driver, dc->index_start + i,
+ tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
+
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+ dev_err(&pdev->dev, "Could not allocate tty?\n");
+ goto err_free_tty;
+ }
}
+
return 0;
+err_free_tty:
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+err_free_kfifo:
+ for (i = 0; i < MAX_PORT; i++)
+ kfifo_free(&dc->port[i].fifo_ul);
err_free_sbuf:
kfree(dc->send_buf);
iounmap(dc->base_addr);
@@ -1536,8 +1559,7 @@ static void __devexit nozomi_card_exit(struct pci_dev *pdev)
free_irq(pdev->irq, dc);
for (i = 0; i < MAX_PORT; i++)
- if (dc->port[i].fifo_ul)
- kfifo_free(dc->port[i].fifo_ul);
+ kfifo_free(&dc->port[i].fifo_ul);
kfree(dc->send_buf);
@@ -1629,10 +1651,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file)
dc->open_ttys--;
port->count--;
- tty_port_tty_set(port, NULL);
if (port->count == 0) {
DBG1("close: %d", nport->token_dl);
+ tty_port_tty_set(port, NULL);
spin_lock_irqsave(&dc->spin_mutex, flags);
dc->last_ier &= ~(nport->token_dl);
writew(dc->last_ier, dc->reg_ier);
@@ -1673,7 +1695,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
goto exit;
}
- rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
+ rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
/* notify card */
if (unlikely(dc == NULL)) {
@@ -1721,7 +1743,7 @@ static int ntty_write_room(struct tty_struct *tty)
if (!port->port.count)
goto exit;
- room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
+ room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
exit:
mutex_unlock(&port->tty_sem);
@@ -1878,7 +1900,7 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty)
goto exit_in_buffer;
}
- rval = __kfifo_len(port->fifo_ul);
+ rval = kfifo_len(&port->fifo_ul);
exit_in_buffer:
return rval;
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index 8c7df5b..f808109 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/mutex.h>
+#include <linux/jiffies.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8258982..2849713 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1051,12 +1051,6 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
/* like a named pipe */
}
- /*
- * If we gave the user some bytes, update the access time.
- */
- if (count)
- file_accessed(file);
-
return (count ? count : retval);
}
@@ -1107,7 +1101,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
size_t ret;
- struct inode *inode = file->f_path.dentry->d_inode;
ret = write_pool(&blocking_pool, buffer, count);
if (ret)
@@ -1116,8 +1109,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
if (ret)
return ret;
- inode->i_mtime = current_fs_time(inode->i_sb);
- mark_inode_dirty(inode);
return (ssize_t)count;
}
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 8c262aaf7..bba727c 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -50,7 +50,6 @@
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/platform_device.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -487,7 +486,7 @@ static struct sonypi_device {
int camera_power;
int bluetooth_power;
struct mutex lock;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
struct fasync_struct *fifo_async;
@@ -496,7 +495,7 @@ static struct sonypi_device {
struct input_dev *input_jog_dev;
struct input_dev *input_key_dev;
struct work_struct input_work;
- struct kfifo *input_fifo;
+ struct kfifo input_fifo;
spinlock_t input_fifo_lock;
} sonypi_device;
@@ -777,8 +776,9 @@ static void input_keyrelease(struct work_struct *work)
{
struct sonypi_keypress kp;
- while (kfifo_get(sonypi_device.input_fifo, (unsigned char *)&kp,
- sizeof(kp)) == sizeof(kp)) {
+ while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
+ sizeof(kp), &sonypi_device.input_fifo_lock)
+ == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
@@ -827,8 +827,9 @@ static void sonypi_report_input_event(u8 event)
if (kp.dev) {
input_report_key(kp.dev, kp.key, 1);
input_sync(kp.dev);
- kfifo_put(sonypi_device.input_fifo,
- (unsigned char *)&kp, sizeof(kp));
+ kfifo_in_locked(&sonypi_device.input_fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sonypi_device.input_fifo_lock);
schedule_work(&sonypi_device.input_work);
}
}
@@ -880,7 +881,8 @@ found:
acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
#endif
- kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_device.fifo_lock);
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_device.fifo_proc_list);
@@ -902,14 +904,13 @@ static int sonypi_misc_release(struct inode *inode, struct file *file)
static int sonypi_misc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
mutex_lock(&sonypi_device.lock);
/* Flush input queue on first open */
if (!sonypi_device.open_count)
- kfifo_reset(sonypi_device.fifo);
+ kfifo_reset(&sonypi_device.fifo);
sonypi_device.open_count++;
mutex_unlock(&sonypi_device.lock);
- unlock_kernel();
+
return 0;
}
@@ -919,17 +920,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_device.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_device.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
- kfifo_len(sonypi_device.fifo) != 0);
+ kfifo_len(&sonypi_device.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
+ &sonypi_device.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
@@ -946,15 +948,15 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
- if (kfifo_len(sonypi_device.fifo))
+ if (kfifo_len(&sonypi_device.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
-static int sonypi_misc_ioctl(struct inode *ip, struct file *fp,
+static long sonypi_misc_ioctl(struct file *fp,
unsigned int cmd, unsigned long arg)
{
- int ret = 0;
+ long ret = 0;
void __user *argp = (void __user *)arg;
u8 val8;
u16 val16;
@@ -1070,7 +1072,8 @@ static const struct file_operations sonypi_misc_fops = {
.open = sonypi_misc_open,
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
- .ioctl = sonypi_misc_ioctl,
+ .unlocked_ioctl = sonypi_misc_ioctl,
+ .llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
@@ -1313,11 +1316,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
"http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
spin_lock_init(&sonypi_device.fifo_lock);
- sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
- &sonypi_device.fifo_lock);
- if (IS_ERR(sonypi_device.fifo)) {
+ error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- return PTR_ERR(sonypi_device.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_device.fifo_proc_list);
@@ -1393,12 +1395,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
}
spin_lock_init(&sonypi_device.input_fifo_lock);
- sonypi_device.input_fifo =
- kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
- &sonypi_device.input_fifo_lock);
- if (IS_ERR(sonypi_device.input_fifo)) {
+ error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- error = PTR_ERR(sonypi_device.input_fifo);
goto err_inpdev_unregister;
}
@@ -1423,7 +1423,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
pci_disable_device(pcidev);
err_put_pcidev:
pci_dev_put(pcidev);
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return error;
}
@@ -1438,7 +1438,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
- kfifo_free(sonypi_device.input_fifo);
+ kfifo_free(&sonypi_device.input_fifo);
}
misc_deregister(&sonypi_misc_device);
@@ -1451,7 +1451,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
pci_dev_put(sonypi_device.dev);
}
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return 0;
}
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 663cd15d..f8bc79f 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -68,7 +68,7 @@
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-
+#include <linux/smp_lock.h>
#include <linux/toshiba.h>
#define TOSH_MINOR_DEV 181
@@ -88,13 +88,13 @@ static int tosh_date;
static int tosh_sci;
static int tosh_fan;
-static int tosh_ioctl(struct inode *, struct file *, unsigned int,
+static long tosh_ioctl(struct file *, unsigned int,
unsigned long);
static const struct file_operations tosh_fops = {
.owner = THIS_MODULE,
- .ioctl = tosh_ioctl,
+ .unlocked_ioctl = tosh_ioctl,
};
static struct miscdevice tosh_device = {
@@ -252,8 +252,7 @@ int tosh_smm(SMMRegisters *regs)
EXPORT_SYMBOL(tosh_smm);
-static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
SMMRegisters regs;
SMMRegisters __user *argp = (SMMRegisters __user *)arg;
@@ -275,13 +274,16 @@ static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
return -EINVAL;
/* do we need to emulate the fan ? */
+ lock_kernel();
if (tosh_fan==1) {
if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) {
err = tosh_emulate_fan(&regs);
+ unlock_kernel();
break;
}
}
err = tosh_smm(&regs);
+ unlock_kernel();
break;
default:
return -EINVAL;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba494..f584407 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
struct tpm_inf_dev {
int iotype;
- void __iomem *mem_base; /* MMIO ioremap'd addr */
- unsigned long map_base; /* phys MMIO base */
- unsigned long map_size; /* MMIO region size */
- unsigned int index_off; /* index register offset */
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
- unsigned int data_regs; /* Data registers */
+ unsigned int data_regs; /* Data registers */
unsigned int data_size;
unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
.miscdev = {.fops = &inf_ops,},
};
-static const struct pnp_device_id tpm_pnp_tbl[] = {
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
/* Infineon TPMs */
{"IFX0101", 0},
{"IFX0102", 0},
{"", 0}
};
-MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_PORT;
+ tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.config_port = pnp_port_start(dev, 0);
tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
goto err_last;
}
} else if (pnp_mem_valid(dev, 0) &&
- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_MEM;
+ tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.map_base = pnp_mem_start(dev, 0);
tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
"product id 0x%02x%02x"
"%s\n",
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.config_port :
- tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.data_regs :
- tpm_dev.map_base + tpm_dev.data_regs,
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
version[0], version[1],
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
+ tpm_dev_vendor_release(chip);
tpm_remove_hardware(chip->dev);
}
}
+static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int rc;
+ if (chip) {
+ u8 savestate[] = {
+ 0, 193, /* TPM_TAG_RQU_COMMAND */
+ 0, 0, 0, 10, /* blob length (in bytes) */
+ 0, 0, 0, 152 /* TPM_ORD_SaveState */
+ };
+ dev_info(&dev->dev, "saving TPM state\n");
+ rc = tpm_inf_send(chip, savestate, sizeof(savestate));
+ if (rc < 0) {
+ dev_err(&dev->dev, "error while saving TPM state\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int tpm_inf_pnp_resume(struct pnp_dev *dev)
+{
+ /* Re-configure TPM after suspending */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+ return tpm_pm_resume(&dev->dev);
+}
+
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
- .driver = {
- .owner = THIS_MODULE,
- .suspend = tpm_pm_suspend,
- .resume = tpm_pm_resume,
- },
- .id_table = tpm_pnp_tbl,
+ .id_table = tpm_inf_pnp_tbl,
.probe = tpm_inf_pnp_probe,
- .remove = __devexit_p(tpm_inf_pnp_remove),
+ .suspend = tpm_inf_pnp_suspend,
+ .resume = tpm_inf_pnp_resume,
+ .remove = __devexit_p(tpm_inf_pnp_remove)
};
static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
-MODULE_VERSION("1.9");
+MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f15df40..dcb9083 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
pid = task_pid(current);
type = PIDTYPE_PID;
}
+ get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
retval = __f_setown(filp, pid, type, 0);
+ put_pid(pid);
if (retval)
goto out;
} else {
diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c
index 867b67b..c7072ba 100644
--- a/drivers/char/uv_mmtimer.c
+++ b/drivers/char/uv_mmtimer.c
@@ -89,13 +89,17 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case MMTIMER_GETOFFSET: /* offset of the counter */
/*
- * UV RTC register is on its own page
+ * Starting with HUB rev 2.0, the UV RTC register is
+ * replicated across all cachelines of it's own page.
+ * This allows faster simultaneous reads from a given socket.
+ *
+ * The offset returned is in 64 bit units.
*/
- if (PAGE_SIZE <= (1 << 16))
- ret = ((UV_LOCAL_MMR_BASE | UVH_RTC) & (PAGE_SIZE-1))
- / 8;
+ if (uv_get_min_hub_revision_id() == 1)
+ ret = 0;
else
- ret = -ENOSYS;
+ ret = ((uv_blade_processor_id() * L1_CACHE_BYTES) %
+ PAGE_SIZE) / 8;
break;
case MMTIMER_GETRES: /* resolution of the clock in 10^-15 s */
@@ -115,8 +119,8 @@ static long uv_mmtimer_ioctl(struct file *file, unsigned int cmd,
ret = hweight64(UVH_RTC_REAL_TIME_CLOCK_MASK);
break;
- case MMTIMER_MMAPAVAIL: /* can we mmap the clock into userspace? */
- ret = (PAGE_SIZE <= (1 << 16)) ? 1 : 0;
+ case MMTIMER_MMAPAVAIL:
+ ret = 1;
break;
case MMTIMER_GETCOUNTER:
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fa..b314a99 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
#define DRV_NAME "cs5535-clockevt"
-static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+static int timer_irq;
module_param_named(irq, timer_irq, int, 0644);
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f060246..537c29a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -36,17 +36,6 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
-static u32 cn_idx = CN_IDX_CONNECTOR;
-static u32 cn_val = CN_VAL_CONNECTOR;
-
-module_param(cn_idx, uint, 0);
-module_param(cn_val, uint, 0);
-MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
-MODULE_PARM_DESC(cn_val, "Connector's main device val.");
-
-static DEFINE_MUTEX(notify_lock);
-static LIST_HEAD(notify_list);
-
static struct cn_dev cdev;
static int cn_already_initialized;
@@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb)
}
/*
- * Notification routing.
- *
- * Gets id and checks if there are notification request for it's idx
- * and val. If there are such requests notify the listeners with the
- * given notify event.
- *
- */
-static void cn_notify(struct cb_id *id, u32 notify_event)
-{
- struct cn_ctl_entry *ent;
-
- mutex_lock(&notify_lock);
- list_for_each_entry(ent, &notify_list, notify_entry) {
- int i;
- struct cn_notify_req *req;
- struct cn_ctl_msg *ctl = ent->msg;
- int idx_found, val_found;
-
- idx_found = val_found = 0;
-
- req = (struct cn_notify_req *)ctl->data;
- for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
- if (id->idx >= req->first &&
- id->idx < req->first + req->range) {
- idx_found = 1;
- break;
- }
- }
-
- for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
- if (id->val >= req->first &&
- id->val < req->first + req->range) {
- val_found = 1;
- break;
- }
- }
-
- if (idx_found && val_found) {
- struct cn_msg m = { .ack = notify_event, };
-
- memcpy(&m.id, id, sizeof(m.id));
- cn_netlink_send(&m, ctl->group, GFP_KERNEL);
- }
- }
- mutex_unlock(&notify_lock);
-}
-
-/*
* Callback add routing - adds callback with given ID and name.
* If there is registered callback with the same ID it will not be added.
*
@@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name,
if (err)
return err;
- cn_notify(id, 0);
-
return 0;
}
EXPORT_SYMBOL_GPL(cn_add_callback);
@@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id)
struct cn_dev *dev = &cdev;
cn_queue_del_callback(dev->cbdev, id);
- cn_notify(id, 1);
}
EXPORT_SYMBOL_GPL(cn_del_callback);
-/*
- * Checks two connector's control messages to be the same.
- * Returns 1 if they are the same or if the first one is corrupted.
- */
-static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
-{
- int i;
- struct cn_notify_req *req1, *req2;
-
- if (m1->idx_notify_num != m2->idx_notify_num)
- return 0;
-
- if (m1->val_notify_num != m2->val_notify_num)
- return 0;
-
- if (m1->len != m2->len)
- return 0;
-
- if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
- m1->len)
- return 1;
-
- req1 = (struct cn_notify_req *)m1->data;
- req2 = (struct cn_notify_req *)m2->data;
-
- for (i = 0; i < m1->idx_notify_num; ++i) {
- if (req1->first != req2->first || req1->range != req2->range)
- return 0;
- req1++;
- req2++;
- }
-
- for (i = 0; i < m1->val_notify_num; ++i) {
- if (req1->first != req2->first || req1->range != req2->range)
- return 0;
- req1++;
- req2++;
- }
-
- return 1;
-}
-
-/*
- * Main connector device's callback.
- *
- * Used for notification of a request's processing.
- */
-static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
-{
- struct cn_ctl_msg *ctl;
- struct cn_ctl_entry *ent;
- u32 size;
-
- if (msg->len < sizeof(*ctl))
- return;
-
- ctl = (struct cn_ctl_msg *)msg->data;
-
- size = (sizeof(*ctl) + ((ctl->idx_notify_num +
- ctl->val_notify_num) *
- sizeof(struct cn_notify_req)));
-
- if (msg->len != size)
- return;
-
- if (ctl->len + sizeof(*ctl) != msg->len)
- return;
-
- /*
- * Remove notification.
- */
- if (ctl->group == 0) {
- struct cn_ctl_entry *n;
-
- mutex_lock(&notify_lock);
- list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
- if (cn_ctl_msg_equals(ent->msg, ctl)) {
- list_del(&ent->notify_entry);
- kfree(ent);
- }
- }
- mutex_unlock(&notify_lock);
-
- return;
- }
-
- size += sizeof(*ent);
-
- ent = kzalloc(size, GFP_KERNEL);
- if (!ent)
- return;
-
- ent->msg = (struct cn_ctl_msg *)(ent + 1);
-
- memcpy(ent->msg, ctl, size - sizeof(*ent));
-
- mutex_lock(&notify_lock);
- list_add(&ent->notify_entry, &notify_list);
- mutex_unlock(&notify_lock);
-}
-
static int cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
@@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = {
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
- int err;
dev->input = cn_rx_skb;
- dev->id.idx = cn_idx;
- dev->id.val = cn_val;
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
CN_NETLINK_USERS + 0xf,
@@ -457,14 +291,6 @@ static int __devinit cn_init(void)
cn_already_initialized = 1;
- err = cn_add_callback(&dev->id, "connector", &cn_callback);
- if (err) {
- cn_already_initialized = 0;
- cn_queue_free_dev(dev->cbdev);
- netlink_kernel_release(dev->nls);
- return -EINVAL;
- }
-
proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
return 0;
@@ -478,7 +304,6 @@ static void __devexit cn_fini(void)
proc_net_remove(&init_net, "connector");
- cn_del_callback(&dev->id);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade..bd444dc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 6810443..73655ae 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -18,6 +18,7 @@
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/sched.h>
+#include <linux/math64.h>
#define BUCKETS 12
#define RESOLUTION 1024
@@ -169,6 +170,12 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_device *dev);
+/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
+static u64 div_round64(u64 dividend, u32 divisor)
+{
+ return div_u64(dividend + (divisor / 2), divisor);
+}
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -209,9 +216,8 @@ static int menu_select(struct cpuidle_device *dev)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
/* Make sure to round up for half microseconds */
- data->predicted_us = DIV_ROUND_CLOSEST(
- data->expected_us * data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
+ data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
+ RESOLUTION * DECAY);
/*
* We want to default to C1 (hlt), not to busy polling
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 0af8057..d3a27e0 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -57,6 +57,23 @@ static int padlock_sha_update(struct shash_desc *desc,
return crypto_shash_update(&dctx->fallback, data, length);
}
+static int padlock_sha_export(struct shash_desc *desc, void *out)
+{
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+
+ return crypto_shash_export(&dctx->fallback, out);
+}
+
+static int padlock_sha_import(struct shash_desc *desc, const void *in)
+{
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
+
+ dctx->fallback.tfm = ctx->fallback;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_import(&dctx->fallback, in);
+}
+
static inline void padlock_output_block(uint32_t *src,
uint32_t *dst, size_t count)
{
@@ -235,7 +252,10 @@ static struct shash_alg sha1_alg = {
.update = padlock_sha_update,
.finup = padlock_sha1_finup,
.final = padlock_sha1_final,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
@@ -256,7 +276,10 @@ static struct shash_alg sha256_alg = {
.update = padlock_sha_update,
.finup = padlock_sha256_finup,
.final = padlock_sha256_final,
+ .export = padlock_sha_export,
+ .import = padlock_sha_import,
.descsize = sizeof(struct padlock_sha_desc),
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb140ff..e02d74b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -111,6 +111,24 @@ config SH_DMAE
help
Enable support for the Renesas SuperH DMA controllers.
+config COH901318
+ bool "ST-Ericsson COH901318 DMA support"
+ select DMA_ENGINE
+ depends on ARCH_U300
+ help
+ Enable support for ST-Ericsson COH 901 318 DMA.
+
+config AMCC_PPC440SPE_ADMA
+ tristate "AMCC PPC440SPe ADMA support"
+ depends on 440SPe || 440SP
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ help
+ Enable support for the AMCC PPC440SPe RAID engines.
+
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index eca71ba..807053d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f151125..efc1a61 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan,
dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
cookie, done ? *done : 0, used ? *used : 0);
- spin_lock_bh(atchan->lock);
+ spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(atchan->lock);
+ spin_unlock_bh(&atchan->lock);
if (done)
*done = last_complete;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
new file mode 100644
index 0000000..64a9372
--- /dev/null
+++ b/drivers/dma/coh901318.c
@@ -0,0 +1,1323 @@
+/*
+ * driver/dma/coh901318.c
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * DMA driver for COH 901 318
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <mach/coh901318.h>
+
+#include "coh901318_lli.h"
+
+#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
+
+#ifdef VERBOSE_DEBUG
+#define COH_DBG(x) ({ if (1) x; 0; })
+#else
+#define COH_DBG(x) ({ if (0) x; 0; })
+#endif
+
+struct coh901318_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ struct coh901318_lli *data;
+ enum dma_data_direction dir;
+ int pending_irqs;
+ unsigned long flags;
+};
+
+struct coh901318_base {
+ struct device *dev;
+ void __iomem *virtbase;
+ struct coh901318_pool pool;
+ struct powersave pm;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct coh901318_chan *chans;
+ struct coh901318_platform *platform;
+};
+
+struct coh901318_chan {
+ spinlock_t lock;
+ int allocated;
+ int completed;
+ int id;
+ int stopped;
+
+ struct work_struct free_work;
+ struct dma_chan chan;
+
+ struct tasklet_struct tasklet;
+
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+
+ unsigned long nbr_active_done;
+ unsigned long busy;
+ int pending_irqs;
+
+ struct coh901318_base *base;
+};
+
+static void coh901318_list_print(struct coh901318_chan *cohc,
+ struct coh901318_lli *lli)
+{
+ struct coh901318_lli *l;
+ dma_addr_t addr = virt_to_phys(lli);
+ int i = 0;
+
+ while (addr) {
+ l = phys_to_virt(addr);
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
+ ", dst 0x%x, link 0x%x link_virt 0x%p\n",
+ i, l, l->control, l->src_addr, l->dst_addr,
+ l->link_addr, phys_to_virt(l->link_addr));
+ i++;
+ addr = l->link_addr;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
+
+static struct coh901318_base *debugfs_dma_base;
+static struct dentry *dma_dentry;
+
+static int coh901318_debugfs_open(struct inode *inode, struct file *file)
+{
+
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int coh901318_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ u64 started_channels = debugfs_dma_base->pm.started_channels;
+ int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
+ int i;
+ int ret = 0;
+ char *dev_buf;
+ char *tmp;
+ int dev_size;
+
+ dev_buf = kmalloc(4*1024, GFP_KERNEL);
+ if (dev_buf == NULL)
+ goto err_kmalloc;
+ tmp = dev_buf;
+
+ tmp += sprintf(tmp, "DMA -- enable dma channels\n");
+
+ for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ if (started_channels & (1 << i))
+ tmp += sprintf(tmp, "channel %d\n", i);
+
+ tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
+ dev_size = tmp - dev_buf;
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (count > dev_size - *f_pos)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, dev_buf + *f_pos, count))
+ ret = -EINVAL;
+ ret = count;
+ *f_pos += count;
+
+ out:
+ kfree(dev_buf);
+ return ret;
+
+ err_kmalloc:
+ return 0;
+}
+
+static const struct file_operations coh901318_debugfs_status_operations = {
+ .owner = THIS_MODULE,
+ .open = coh901318_debugfs_open,
+ .read = coh901318_debugfs_read,
+};
+
+
+static int __init init_coh901318_debugfs(void)
+{
+
+ dma_dentry = debugfs_create_dir("dma", NULL);
+
+ (void) debugfs_create_file("status",
+ S_IFREG | S_IRUGO,
+ dma_dentry, NULL,
+ &coh901318_debugfs_status_operations);
+ return 0;
+}
+
+static void __exit exit_coh901318_debugfs(void)
+{
+ debugfs_remove_recursive(dma_dentry);
+}
+
+module_init(init_coh901318_debugfs);
+module_exit(exit_coh901318_debugfs);
+#else
+
+#define COH901318_DEBUGFS_ASSIGN(x, y)
+
+#endif /* CONFIG_DEBUG_FS */
+
+static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct coh901318_chan, chan);
+}
+
+static inline dma_addr_t
+cohc_dev_addr(struct coh901318_chan *cohc)
+{
+ return cohc->base->platform->chan_conf[cohc->id].dev_addr;
+}
+
+static inline const struct coh901318_params *
+cohc_chan_param(struct coh901318_chan *cohc)
+{
+ return &cohc->base->platform->chan_conf[cohc->id].param;
+}
+
+static inline const struct coh_dma_channel *
+cohc_chan_conf(struct coh901318_chan *cohc)
+{
+ return &cohc->base->platform->chan_conf[cohc->id];
+}
+
+static void enable_powersave(struct coh901318_chan *cohc)
+{
+ unsigned long flags;
+ struct powersave *pm = &cohc->base->pm;
+
+ spin_lock_irqsave(&pm->lock, flags);
+
+ pm->started_channels &= ~(1ULL << cohc->id);
+
+ if (!pm->started_channels) {
+ /* DMA no longer intends to access memory */
+ cohc->base->platform->access_memory_state(cohc->base->dev,
+ false);
+ }
+
+ spin_unlock_irqrestore(&pm->lock, flags);
+}
+static void disable_powersave(struct coh901318_chan *cohc)
+{
+ unsigned long flags;
+ struct powersave *pm = &cohc->base->pm;
+
+ spin_lock_irqsave(&pm->lock, flags);
+
+ if (!pm->started_channels) {
+ /* DMA intends to access memory */
+ cohc->base->platform->access_memory_state(cohc->base->dev,
+ true);
+ }
+
+ pm->started_channels |= (1ULL << cohc->id);
+
+ spin_unlock_irqrestore(&pm->lock, flags);
+}
+
+static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ writel(control,
+ virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * channel);
+ return 0;
+}
+
+static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ writel(conf,
+ virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+ return 0;
+}
+
+
+static int coh901318_start(struct coh901318_chan *cohc)
+{
+ u32 val;
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ disable_powersave(cohc);
+
+ val = readl(virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Enable channel */
+ val |= COH901318_CX_CFG_CH_ENABLE;
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ return 0;
+}
+
+static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
+ struct coh901318_lli *data)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ BUG_ON(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*channel) &
+ COH901318_CX_STAT_ACTIVE);
+
+ writel(data->src_addr,
+ virtbase + COH901318_CX_SRC_ADDR +
+ COH901318_CX_SRC_ADDR_SPACING * channel);
+
+ writel(data->dst_addr, virtbase +
+ COH901318_CX_DST_ADDR +
+ COH901318_CX_DST_ADDR_SPACING * channel);
+
+ writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING * channel);
+
+ writel(data->control, virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * channel);
+
+ return 0;
+}
+static dma_cookie_t
+coh901318_assign_cookie(struct coh901318_chan *cohc,
+ struct coh901318_desc *cohd)
+{
+ dma_cookie_t cookie = cohc->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ cohc->chan.cookie = cookie;
+ cohd->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct coh901318_desc *
+coh901318_desc_get(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *desc;
+
+ if (list_empty(&cohc->free)) {
+ /* alloc new desc because we're out of used ones
+ * TODO: alloc a pile of descs instead of just one,
+ * avoid many small allocations.
+ */
+ desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&cohc->free,
+ struct coh901318_desc,
+ node);
+ list_del(&desc->node);
+ }
+
+ out:
+ return desc;
+}
+
+static void
+coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
+{
+ list_add_tail(&cohd->node, &cohc->free);
+}
+
+/* call with irq lock held */
+static void
+coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
+{
+ list_add_tail(&desc->node, &cohc->active);
+
+ BUG_ON(cohc->pending_irqs != 0);
+
+ cohc->pending_irqs = desc->pending_irqs;
+}
+
+static struct coh901318_desc *
+coh901318_first_active_get(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *d;
+
+ if (list_empty(&cohc->active))
+ return NULL;
+
+ d = list_first_entry(&cohc->active,
+ struct coh901318_desc,
+ node);
+ return d;
+}
+
+static void
+coh901318_desc_remove(struct coh901318_desc *cohd)
+{
+ list_del(&cohd->node);
+}
+
+static void
+coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
+{
+ list_add_tail(&desc->node, &cohc->queue);
+}
+
+static struct coh901318_desc *
+coh901318_first_queued(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *d;
+
+ if (list_empty(&cohc->queue))
+ return NULL;
+
+ d = list_first_entry(&cohc->queue,
+ struct coh901318_desc,
+ node);
+ return d;
+}
+
+/*
+ * DMA start/stop controls
+ */
+u32 coh901318_get_bytes_left(struct dma_chan *chan)
+{
+ unsigned long flags;
+ u32 ret;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Read transfer count value */
+ ret = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
+ cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(coh901318_get_bytes_left);
+
+
+/* Stops a transfer without losing data. Enables power save.
+ Use this function in conjunction with coh901318_continue(..)
+*/
+void coh901318_stop(struct dma_chan *chan)
+{
+ u32 val;
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Disable channel in HW */
+ val = readl(virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Stopping infinit transfer */
+ if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
+ (val & COH901318_CX_CFG_CH_ENABLE))
+ cohc->stopped = 1;
+
+
+ val &= ~COH901318_CX_CFG_CH_ENABLE;
+ /* Enable twice, HW bug work around */
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Spin-wait for it to actually go inactive */
+ while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
+ channel) & COH901318_CX_STAT_ACTIVE)
+ cpu_relax();
+
+ /* Check if we stopped an active job */
+ if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
+ channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
+ cohc->stopped = 1;
+
+ enable_powersave(cohc);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+EXPORT_SYMBOL(coh901318_stop);
+
+/* Continues a transfer that has been stopped via 300_dma_stop(..).
+ Power save is handled.
+*/
+void coh901318_continue(struct dma_chan *chan)
+{
+ u32 val;
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ disable_powersave(cohc);
+
+ if (cohc->stopped) {
+ /* Enable channel in HW */
+ val = readl(cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ val |= COH901318_CX_CFG_CH_ENABLE;
+
+ writel(val, cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+
+ cohc->stopped = 0;
+ }
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+EXPORT_SYMBOL(coh901318_continue);
+
+bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == to_coh901318_chan(chan)->id)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(coh901318_filter_id);
+
+/*
+ * DMA channel allocation
+ */
+static int coh901318_config(struct coh901318_chan *cohc,
+ struct coh901318_params *param)
+{
+ unsigned long flags;
+ const struct coh901318_params *p;
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ if (param)
+ p = param;
+ else
+ p = &cohc->base->platform->chan_conf[channel].param;
+
+ /* Clear any pending BE or TC interrupt */
+ if (channel < 32) {
+ writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
+ writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ writel(1 << (channel - 32), virtbase +
+ COH901318_BE_INT_CLEAR2);
+ writel(1 << (channel - 32), virtbase +
+ COH901318_TC_INT_CLEAR2);
+ }
+
+ coh901318_set_conf(cohc, p->config);
+ coh901318_set_ctrl(cohc, p->ctrl_lli_last);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
+}
+
+/* must lock when calling this function
+ * start queued jobs, if any
+ * TODO: start all queued jobs in one go
+ *
+ * Returns descriptor if queued job is started otherwise NULL.
+ * If the queue is empty NULL is returned.
+ */
+static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *cohd_que;
+
+ /* start queued jobs, if any
+ * TODO: transmit all queued jobs in one go
+ */
+ cohd_que = coh901318_first_queued(cohc);
+
+ if (cohd_que != NULL) {
+ /* Remove from queue */
+ coh901318_desc_remove(cohd_que);
+ /* initiate DMA job */
+ cohc->busy = 1;
+
+ coh901318_desc_submit(cohc, cohd_que);
+
+ coh901318_prep_linked_list(cohc, cohd_que->data);
+
+ /* start dma job */
+ coh901318_start(cohc);
+
+ }
+
+ return cohd_que;
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct coh901318_chan *cohc = (struct coh901318_chan *) data;
+ struct coh901318_desc *cohd_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* get first active entry from list */
+ cohd_fin = coh901318_first_active_get(cohc);
+
+ BUG_ON(cohd_fin->pending_irqs == 0);
+
+ if (cohd_fin == NULL)
+ goto err;
+
+ cohd_fin->pending_irqs--;
+ cohc->completed = cohd_fin->desc.cookie;
+
+ if (cohc->nbr_active_done == 0)
+ return;
+
+ if (!cohd_fin->pending_irqs) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ }
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
+ " nbr_active_done %ld\n", __func__,
+ cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
+
+ /* callback to client */
+ callback = cohd_fin->desc.callback;
+ callback_param = cohd_fin->desc.callback_param;
+
+ if (!cohd_fin->pending_irqs) {
+ coh901318_desc_remove(cohd_fin);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd_fin);
+ }
+
+ if (cohc->nbr_active_done)
+ cohc->nbr_active_done--;
+
+ if (cohc->nbr_active_done) {
+ if (cohc_chan_conf(cohc)->priority_high)
+ tasklet_hi_schedule(&cohc->tasklet);
+ else
+ tasklet_schedule(&cohc->tasklet);
+ }
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ spin_unlock_irqrestore(&cohc->lock, flags);
+ dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
+}
+
+
+/* called from interrupt context */
+static void dma_tc_handle(struct coh901318_chan *cohc)
+{
+ BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
+ list_empty(&cohc->queue)));
+
+ if (!cohc->allocated)
+ return;
+
+ BUG_ON(cohc->pending_irqs == 0);
+
+ cohc->pending_irqs--;
+ cohc->nbr_active_done++;
+
+ if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
+ cohc->busy = 0;
+
+ BUG_ON(list_empty(&cohc->active));
+
+ if (cohc_chan_conf(cohc)->priority_high)
+ tasklet_hi_schedule(&cohc->tasklet);
+ else
+ tasklet_schedule(&cohc->tasklet);
+}
+
+
+static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+{
+ u32 status1;
+ u32 status2;
+ int i;
+ int ch;
+ struct coh901318_base *base = dev_id;
+ struct coh901318_chan *cohc;
+ void __iomem *virtbase = base->virtbase;
+
+ status1 = readl(virtbase + COH901318_INT_STATUS1);
+ status2 = readl(virtbase + COH901318_INT_STATUS2);
+
+ if (unlikely(status1 == 0 && status2 == 0)) {
+ dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
+ return IRQ_HANDLED;
+ }
+
+ /* TODO: consider handle IRQ in tasklet here to
+ * minimize interrupt latency */
+
+ /* Check the first 32 DMA channels for IRQ */
+ while (status1) {
+ /* Find first bit set, return as a number. */
+ i = ffs(status1) - 1;
+ ch = i;
+
+ cohc = &base->chans[ch];
+ spin_lock(&cohc->lock);
+
+ /* Mask off this bit */
+ status1 &= ~(1 << i);
+ /* Check the individual channel bits */
+ if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
+ dev_crit(COHC_2_DEV(cohc),
+ "DMA bus error on channel %d!\n", ch);
+ BUG_ON(1);
+ /* Clear BE interrupt */
+ __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
+ } else {
+ /* Caused by TC, really? */
+ if (unlikely(!test_bit(i, virtbase +
+ COH901318_TC_INT_STATUS1))) {
+ dev_warn(COHC_2_DEV(cohc),
+ "ignoring interrupt not caused by terminal count on channel %d\n", ch);
+ /* Clear TC interrupt */
+ BUG_ON(1);
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ /* Enable powersave if transfer has finished */
+ if (!(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*ch) &
+ COH901318_CX_STAT_ENABLED)) {
+ enable_powersave(cohc);
+ }
+
+ /* Must clear TC interrupt before calling
+ * dma_tc_handle
+ * in case tc_handle initate a new dma job
+ */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
+
+ dma_tc_handle(cohc);
+ }
+ }
+ spin_unlock(&cohc->lock);
+ }
+
+ /* Check the remaining 32 DMA channels for IRQ */
+ while (status2) {
+ /* Find first bit set, return as a number. */
+ i = ffs(status2) - 1;
+ ch = i + 32;
+ cohc = &base->chans[ch];
+ spin_lock(&cohc->lock);
+
+ /* Mask off this bit */
+ status2 &= ~(1 << i);
+ /* Check the individual channel bits */
+ if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
+ dev_crit(COHC_2_DEV(cohc),
+ "DMA bus error on channel %d!\n", ch);
+ /* Clear BE interrupt */
+ BUG_ON(1);
+ __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
+ } else {
+ /* Caused by TC, really? */
+ if (unlikely(!test_bit(i, virtbase +
+ COH901318_TC_INT_STATUS2))) {
+ dev_warn(COHC_2_DEV(cohc),
+ "ignoring interrupt not caused by terminal count on channel %d\n", ch);
+ /* Clear TC interrupt */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
+ BUG_ON(1);
+ } else {
+ /* Enable powersave if transfer has finished */
+ if (!(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*ch) &
+ COH901318_CX_STAT_ENABLED)) {
+ enable_powersave(cohc);
+ }
+ /* Must clear TC interrupt before calling
+ * dma_tc_handle
+ * in case tc_handle initate a new dma job
+ */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
+
+ dma_tc_handle(cohc);
+ }
+ }
+ spin_unlock(&cohc->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int coh901318_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
+ __func__, cohc->id);
+
+ if (chan->client_count > 1)
+ return -EBUSY;
+
+ coh901318_config(cohc, NULL);
+
+ cohc->allocated = 1;
+ cohc->completed = chan->cookie = 1;
+
+ return 1;
+}
+
+static void
+coh901318_free_chan_resources(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Disable HW */
+ writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+ writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING*channel);
+
+ cohc->allocated = 0;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ chan->device->device_terminate_all(chan);
+}
+
+
+static dma_cookie_t
+coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
+ desc);
+ struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ tx->cookie = coh901318_assign_cookie(cohc, cohd);
+
+ coh901318_desc_queue(cohc, cohd);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return tx->cookie;
+}
+
+static struct dma_async_tx_descriptor *
+coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t size, unsigned long flags)
+{
+ struct coh901318_lli *data;
+ struct coh901318_desc *cohd;
+ unsigned long flg;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int lli_len;
+ u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+
+ spin_lock_irqsave(&cohc->lock, flg);
+
+ dev_vdbg(COHC_2_DEV(cohc),
+ "[%s] channel %d src 0x%x dest 0x%x size %d\n",
+ __func__, cohc->id, src, dest, size);
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last lli */
+ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
+
+ lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
+ if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
+ lli_len++;
+
+ data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+
+ if (data == NULL)
+ goto err;
+
+ cohd = coh901318_desc_get(cohc);
+ cohd->sg = NULL;
+ cohd->sg_len = 0;
+ cohd->data = data;
+
+ cohd->pending_irqs =
+ coh901318_lli_fill_memcpy(
+ &cohc->base->pool, data, src, size, dest,
+ cohc_chan_param(cohc)->ctrl_lli_chained,
+ ctrl_last);
+ cohd->flags = flags;
+
+ COH_DBG(coh901318_list_print(cohc, data));
+
+ dma_async_tx_descriptor_init(&cohd->desc, chan);
+
+ cohd->desc.tx_submit = coh901318_tx_submit;
+
+ spin_unlock_irqrestore(&cohc->lock, flg);
+
+ return &cohd->desc;
+ err:
+ spin_unlock_irqrestore(&cohc->lock, flg);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_lli *data;
+ struct coh901318_desc *cohd;
+ struct scatterlist *sg;
+ int len = 0;
+ int size;
+ int i;
+ u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
+ u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
+ u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ unsigned long flg;
+
+ if (!sgl)
+ goto out;
+ if (sgl->length == 0)
+ goto out;
+
+ spin_lock_irqsave(&cohc->lock, flg);
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
+ __func__, sg_len, direction);
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last lli */
+ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
+
+ cohd = coh901318_desc_get(cohc);
+ cohd->sg = NULL;
+ cohd->sg_len = 0;
+ cohd->dir = direction;
+
+ if (direction == DMA_TO_DEVICE) {
+ u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
+
+ ctrl_chained |= tx_flags;
+ ctrl_last |= tx_flags;
+ ctrl |= tx_flags;
+ } else if (direction == DMA_FROM_DEVICE) {
+ u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
+
+ ctrl_chained |= rx_flags;
+ ctrl_last |= rx_flags;
+ ctrl |= rx_flags;
+ } else
+ goto err_direction;
+
+ dma_async_tx_descriptor_init(&cohd->desc, chan);
+
+ cohd->desc.tx_submit = coh901318_tx_submit;
+
+
+ /* The dma only supports transmitting packages up to
+ * MAX_DMA_PACKET_SIZE. Calculate to total number of
+ * dma elemts required to send the entire sg list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ unsigned int factor;
+ size = sg_dma_len(sg);
+
+ if (size <= MAX_DMA_PACKET_SIZE) {
+ len++;
+ continue;
+ }
+
+ factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
+ if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
+ factor++;
+
+ len += factor;
+ }
+
+ data = coh901318_lli_alloc(&cohc->base->pool, len);
+
+ if (data == NULL)
+ goto err_dma_alloc;
+
+ /* initiate allocated data list */
+ cohd->pending_irqs =
+ coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ cohc_dev_addr(cohc),
+ ctrl_chained,
+ ctrl,
+ ctrl_last,
+ direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
+ cohd->data = data;
+
+ cohd->flags = flags;
+
+ COH_DBG(coh901318_list_print(cohc, data));
+
+ spin_unlock_irqrestore(&cohc->lock, flg);
+
+ return &cohd->desc;
+ err_dma_alloc:
+ err_direction:
+ coh901318_desc_remove(cohd);
+ coh901318_desc_free(cohc, cohd);
+ spin_unlock_irqrestore(&cohc->lock, flg);
+ out:
+ return NULL;
+}
+
+static enum dma_status
+coh901318_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = cohc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return ret;
+}
+
+static void
+coh901318_issue_pending(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!cohc->busy)
+ coh901318_queue_start(cohc);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+
+static void
+coh901318_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ coh901318_stop(chan);
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Clear any pending BE or TC interrupt */
+ if (cohc->id < 32) {
+ writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
+ writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ writel(1 << (cohc->id - 32), virtbase +
+ COH901318_BE_INT_CLEAR2);
+ writel(1 << (cohc->id - 32), virtbase +
+ COH901318_TC_INT_CLEAR2);
+ }
+
+ enable_powersave(cohc);
+
+ while ((cohd = coh901318_first_active_get(cohc))) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd->data);
+
+ coh901318_desc_remove(cohd);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd);
+ }
+
+ while ((cohd = coh901318_first_queued(cohc))) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd->data);
+
+ coh901318_desc_remove(cohd);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd);
+ }
+
+
+ cohc->nbr_active_done = 0;
+ cohc->busy = 0;
+ cohc->pending_irqs = 0;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
+ struct coh901318_base *base)
+{
+ int chans_i;
+ int i = 0;
+ struct coh901318_chan *cohc;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+ for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+ cohc = &base->chans[i];
+
+ cohc->base = base;
+ cohc->chan.device = dma;
+ cohc->id = i;
+
+ /* TODO: do we really need this lock if only one
+ * client is connected to each channel?
+ */
+
+ spin_lock_init(&cohc->lock);
+
+ cohc->pending_irqs = 0;
+ cohc->nbr_active_done = 0;
+ cohc->busy = 0;
+ INIT_LIST_HEAD(&cohc->free);
+ INIT_LIST_HEAD(&cohc->active);
+ INIT_LIST_HEAD(&cohc->queue);
+
+ tasklet_init(&cohc->tasklet, dma_tasklet,
+ (unsigned long) cohc);
+
+ list_add_tail(&cohc->chan.device_node,
+ &dma->channels);
+ }
+ }
+}
+
+static int __init coh901318_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct coh901318_platform *pdata;
+ struct coh901318_base *base;
+ int irq;
+ struct resource *io;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io)
+ goto err_get_resource;
+
+ /* Map DMA controller registers to virtual memory */
+ if (request_mem_region(io->start,
+ resource_size(io),
+ pdev->dev.driver->name) == NULL) {
+ err = -EBUSY;
+ goto err_request_mem;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ goto err_no_platformdata;
+
+ base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
+ pdata->max_channels *
+ sizeof(struct coh901318_chan),
+ GFP_KERNEL);
+ if (!base)
+ goto err_alloc_coh_dma_channels;
+
+ base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
+
+ base->virtbase = ioremap(io->start, resource_size(io));
+ if (!base->virtbase) {
+ err = -ENOMEM;
+ goto err_no_ioremap;
+ }
+
+ base->dev = &pdev->dev;
+ base->platform = pdata;
+ spin_lock_init(&base->pm.lock);
+ base->pm.started_channels = 0;
+
+ COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
+
+ platform_set_drvdata(pdev, base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_no_irq;
+
+ err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
+ "coh901318", base);
+ if (err) {
+ dev_crit(&pdev->dev,
+ "Cannot allocate IRQ for DMA controller!\n");
+ goto err_request_irq;
+ }
+
+ err = coh901318_pool_create(&base->pool, &pdev->dev,
+ sizeof(struct coh901318_lli),
+ 32);
+ if (err)
+ goto err_pool_create;
+
+ /* init channels for device transfers */
+ coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ base);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
+ base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
+ base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_issue_pending = coh901318_issue_pending;
+ base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.dev = &pdev->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err)
+ goto err_register_slave;
+
+ /* init channels for memcpy */
+ coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ base);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
+ base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
+ base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.dev = &pdev->dev;
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err)
+ goto err_register_memcpy;
+
+ dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
+ (u32) base->virtbase);
+
+ return err;
+
+ err_register_memcpy:
+ dma_async_device_unregister(&base->dma_slave);
+ err_register_slave:
+ coh901318_pool_destroy(&base->pool);
+ err_pool_create:
+ free_irq(platform_get_irq(pdev, 0), base);
+ err_request_irq:
+ err_no_irq:
+ iounmap(base->virtbase);
+ err_no_ioremap:
+ kfree(base);
+ err_alloc_coh_dma_channels:
+ err_no_platformdata:
+ release_mem_region(pdev->resource->start,
+ resource_size(pdev->resource));
+ err_request_mem:
+ err_get_resource:
+ return err;
+}
+
+static int __exit coh901318_remove(struct platform_device *pdev)
+{
+ struct coh901318_base *base = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&base->dma_memcpy);
+ dma_async_device_unregister(&base->dma_slave);
+ coh901318_pool_destroy(&base->pool);
+ free_irq(platform_get_irq(pdev, 0), base);
+ iounmap(base->virtbase);
+ kfree(base);
+ release_mem_region(pdev->resource->start,
+ resource_size(pdev->resource));
+ return 0;
+}
+
+
+static struct platform_driver coh901318_driver = {
+ .remove = __exit_p(coh901318_remove),
+ .driver = {
+ .name = "coh901318",
+ },
+};
+
+int __init coh901318_init(void)
+{
+ return platform_driver_probe(&coh901318_driver, coh901318_probe);
+}
+subsys_initcall(coh901318_init);
+
+void __exit coh901318_exit(void)
+{
+ platform_driver_unregister(&coh901318_driver);
+}
+module_exit(coh901318_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Per Friden");
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
new file mode 100644
index 0000000..f5120f2
--- /dev/null
+++ b/drivers/dma/coh901318_lli.c
@@ -0,0 +1,318 @@
+/*
+ * driver/dma/coh901318_lli.c
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Support functions for handling lli for dma
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/dmapool.h>
+#include <linux/memory.h>
+#include <mach/coh901318.h>
+
+#include "coh901318_lli.h"
+
+#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
+#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
+#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
+#else
+#define DEBUGFS_POOL_COUNTER_RESET(pool)
+#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
+#endif
+
+static struct coh901318_lli *
+coh901318_lli_next(struct coh901318_lli *data)
+{
+ if (data == NULL || data->link_addr == 0)
+ return NULL;
+
+ return (struct coh901318_lli *) data->virt_link_addr;
+}
+
+int coh901318_pool_create(struct coh901318_pool *pool,
+ struct device *dev,
+ size_t size, size_t align)
+{
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
+
+ DEBUGFS_POOL_COUNTER_RESET(pool);
+ return 0;
+}
+
+int coh901318_pool_destroy(struct coh901318_pool *pool)
+{
+
+ dma_pool_destroy(pool->dmapool);
+ return 0;
+}
+
+struct coh901318_lli *
+coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
+{
+ int i;
+ struct coh901318_lli *head;
+ struct coh901318_lli *lli;
+ struct coh901318_lli *lli_prev;
+ dma_addr_t phy;
+
+ if (len == 0)
+ goto err;
+
+ spin_lock(&pool->lock);
+
+ head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
+
+ if (head == NULL)
+ goto err;
+
+ DEBUGFS_POOL_COUNTER_ADD(pool, 1);
+
+ lli = head;
+ lli->phy_this = phy;
+
+ for (i = 1; i < len; i++) {
+ lli_prev = lli;
+
+ lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
+
+ if (lli == NULL)
+ goto err_clean_up;
+
+ DEBUGFS_POOL_COUNTER_ADD(pool, 1);
+ lli->phy_this = phy;
+
+ lli_prev->link_addr = phy;
+ lli_prev->virt_link_addr = lli;
+ }
+
+ lli->link_addr = 0x00000000U;
+
+ spin_unlock(&pool->lock);
+
+ return head;
+
+ err:
+ spin_unlock(&pool->lock);
+ return NULL;
+
+ err_clean_up:
+ lli_prev->link_addr = 0x00000000U;
+ spin_unlock(&pool->lock);
+ coh901318_lli_free(pool, &head);
+ return NULL;
+}
+
+void coh901318_lli_free(struct coh901318_pool *pool,
+ struct coh901318_lli **lli)
+{
+ struct coh901318_lli *l;
+ struct coh901318_lli *next;
+
+ if (lli == NULL)
+ return;
+
+ l = *lli;
+
+ if (l == NULL)
+ return;
+
+ spin_lock(&pool->lock);
+
+ while (l->link_addr) {
+ next = l->virt_link_addr;
+ dma_pool_free(pool->dmapool, l, l->phy_this);
+ DEBUGFS_POOL_COUNTER_ADD(pool, -1);
+ l = next;
+ }
+ dma_pool_free(pool->dmapool, l, l->phy_this);
+ DEBUGFS_POOL_COUNTER_ADD(pool, -1);
+
+ spin_unlock(&pool->lock);
+ *lli = NULL;
+}
+
+int
+coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t source, unsigned int size,
+ dma_addr_t destination, u32 ctrl_chained,
+ u32 ctrl_eom)
+{
+ int s = size;
+ dma_addr_t src = source;
+ dma_addr_t dst = destination;
+
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ while (lli->link_addr) {
+ lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ s -= MAX_DMA_PACKET_SIZE;
+ lli = coh901318_lli_next(lli);
+
+ src += MAX_DMA_PACKET_SIZE;
+ dst += MAX_DMA_PACKET_SIZE;
+ }
+
+ lli->control = ctrl_eom | s;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ /* One irq per single transfer */
+ return 1;
+}
+
+int
+coh901318_lli_fill_single(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t buf, unsigned int size,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
+ enum dma_data_direction dir)
+{
+ int s = size;
+ dma_addr_t src;
+ dma_addr_t dst;
+
+
+ if (dir == DMA_TO_DEVICE) {
+ src = buf;
+ dst = dev_addr;
+
+ } else if (dir == DMA_FROM_DEVICE) {
+
+ src = dev_addr;
+ dst = buf;
+ } else {
+ return -EINVAL;
+ }
+
+ while (lli->link_addr) {
+ size_t block_size = MAX_DMA_PACKET_SIZE;
+ lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
+
+ /* If we are on the next-to-final block and there will
+ * be less than half a DMA packet left for the last
+ * block, then we want to make this block a little
+ * smaller to balance the sizes. This is meant to
+ * avoid too small transfers if the buffer size is
+ * (MAX_DMA_PACKET_SIZE*N + 1) */
+ if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
+ block_size = MAX_DMA_PACKET_SIZE/2;
+
+ s -= block_size;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ lli = coh901318_lli_next(lli);
+
+ if (dir == DMA_TO_DEVICE)
+ src += block_size;
+ else if (dir == DMA_FROM_DEVICE)
+ dst += block_size;
+ }
+
+ lli->control = ctrl_eom | s;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ /* One irq per single transfer */
+ return 1;
+}
+
+int
+coh901318_lli_fill_sg(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ struct scatterlist *sgl, unsigned int nents,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
+ u32 ctrl_last,
+ enum dma_data_direction dir, u32 ctrl_irq_mask)
+{
+ int i;
+ struct scatterlist *sg;
+ u32 ctrl_sg;
+ dma_addr_t src = 0;
+ dma_addr_t dst = 0;
+ int nbr_of_irq = 0;
+ u32 bytes_to_transfer;
+ u32 elem_size;
+
+ if (lli == NULL)
+ goto err;
+
+ spin_lock(&pool->lock);
+
+ if (dir == DMA_TO_DEVICE)
+ dst = dev_addr;
+ else if (dir == DMA_FROM_DEVICE)
+ src = dev_addr;
+ else
+ goto err;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (sg_is_chain(sg)) {
+ /* sg continues to the next sg-element don't
+ * send ctrl_finish until the last
+ * sg-element in the chain
+ */
+ ctrl_sg = ctrl_chained;
+ } else if (i == nents - 1)
+ ctrl_sg = ctrl_last;
+ else
+ ctrl_sg = ctrl ? ctrl : ctrl_last;
+
+
+ if ((ctrl_sg & ctrl_irq_mask))
+ nbr_of_irq++;
+
+ if (dir == DMA_TO_DEVICE)
+ /* increment source address */
+ src = sg_dma_address(sg);
+ else
+ /* increment destination address */
+ dst = sg_dma_address(sg);
+
+ bytes_to_transfer = sg_dma_len(sg);
+
+ while (bytes_to_transfer) {
+ u32 val;
+
+ if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
+ elem_size = MAX_DMA_PACKET_SIZE;
+ val = ctrl_chained;
+ } else {
+ elem_size = bytes_to_transfer;
+ val = ctrl_sg;
+ }
+
+ lli->control = val | elem_size;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ if (dir == DMA_FROM_DEVICE)
+ dst += elem_size;
+ else
+ src += elem_size;
+
+ BUG_ON(lli->link_addr & 3);
+
+ bytes_to_transfer -= elem_size;
+ lli = coh901318_lli_next(lli);
+ }
+
+ }
+ spin_unlock(&pool->lock);
+
+ /* There can be many IRQs per sg transfer */
+ return nbr_of_irq;
+ err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
new file mode 100644
index 0000000..7bf713b
--- /dev/null
+++ b/drivers/dma/coh901318_lli.h
@@ -0,0 +1,124 @@
+/*
+ * driver/dma/coh901318_lli.h
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Support functions for handling lli for coh901318
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#ifndef COH901318_LLI_H
+#define COH901318_LLI_H
+
+#include <mach/coh901318.h>
+
+struct device;
+
+struct coh901318_pool {
+ spinlock_t lock;
+ struct dma_pool *dmapool;
+ struct device *dev;
+
+#ifdef CONFIG_DEBUG_FS
+ int debugfs_pool_counter;
+#endif
+};
+
+struct device;
+/**
+ * coh901318_pool_create() - Creates an dma pool for lli:s
+ * @pool: pool handle
+ * @dev: dma device
+ * @lli_nbr: number of lli:s in the pool
+ * @algin: adress alignemtn of lli:s
+ * returns 0 on success otherwise none zero
+ */
+int coh901318_pool_create(struct coh901318_pool *pool,
+ struct device *dev,
+ size_t lli_nbr, size_t align);
+
+/**
+ * coh901318_pool_destroy() - Destroys the dma pool
+ * @pool: pool handle
+ * returns 0 on success otherwise none zero
+ */
+int coh901318_pool_destroy(struct coh901318_pool *pool);
+
+/**
+ * coh901318_lli_alloc() - Allocates a linked list
+ *
+ * @pool: pool handle
+ * @len: length to list
+ * return: none NULL if success otherwise NULL
+ */
+struct coh901318_lli *
+coh901318_lli_alloc(struct coh901318_pool *pool,
+ unsigned int len);
+
+/**
+ * coh901318_lli_free() - Returns the linked list items to the pool
+ * @pool: pool handle
+ * @lli: reference to lli pointer to be freed
+ */
+void coh901318_lli_free(struct coh901318_pool *pool,
+ struct coh901318_lli **lli);
+
+/**
+ * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @src: src address
+ * @size: transfer size
+ * @dst: destination address
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl_last: ctrl for the last lli
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t src, unsigned int size,
+ dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
+
+/**
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @buf: transfer buffer
+ * @size: transfer size
+ * @dev_addr: address of periphal
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl_last: ctrl for the last lli
+ * @dir: direction of transfer (to or from device)
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_single(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t buf, unsigned int size,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
+ enum dma_data_direction dir);
+
+/**
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @sg: scatter gather list
+ * @nents: number of entries in sg
+ * @dev_addr: address of periphal
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl: ctrl of middle lli
+ * @ctrl_last: ctrl for the last lli
+ * @dir: direction of transfer (to or from device)
+ * @ctrl_irq_mask: ctrl mask for CPU interrupt
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_sg(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ struct scatterlist *sg, unsigned int nents,
+ dma_addr_t dev_addr, u32 ctrl_chained,
+ u32 ctrl, u32 ctrl_last,
+ enum dma_data_direction dir, u32 ctrl_irq_mask);
+
+#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a..e7a3230 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
}
}
EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a32a4cf..948d563 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -298,10 +298,6 @@ static int dmatest_func(void *data)
total_tests++;
- len = dmatest_random() % test_buf_size + 1;
- src_off = dmatest_random() % (test_buf_size - len + 1);
- dst_off = dmatest_random() % (test_buf_size - len + 1);
-
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
@@ -310,7 +306,19 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_PQ)
align = dev->pq_align;
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = dmatest_random() % test_buf_size + 1;
len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst_off = dmatest_random() % (test_buf_size - len + 1);
+
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
@@ -459,7 +467,7 @@ err_srcs:
if (iterations > 0)
while (!kthread_should_stop()) {
- DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 285bed0..d28369f 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1270,8 +1270,6 @@ static int __init dw_probe(struct platform_device *pdev)
goto err_kfree;
}
- memset(dw, 0, sizeof *dw);
-
dw->regs = ioremap(io->start, DW_REGLEN);
if (!dw->regs) {
err = -ENOMEM;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36..dcc4ab7 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
dma->dev = &pdev->dev;
if (!dma->chancnt) {
- dev_err(dev, "zero channels detected\n");
+ dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 45edde9..bbc3e78 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
* @dca: direct cache access context
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
+ * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
struct dca_provider *dca;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
+ int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
+static inline void ioat_reset(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+
+ writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u8 cmd;
+
+ cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
+}
+
static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 8f1f7f0..5cc37af 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
__ioat2_start_null_desc(ioat);
}
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ unsigned long end = jiffies + tmo;
+ int err = 0;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
+ if (tmo && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
status = ioat_chansts(chan);
cpu_relax();
}
+ return err;
+}
+
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
+{
+ unsigned long end = jiffies + tmo;
+ int err = 0;
+
+ ioat_reset(chan);
+ while (ioat_reset_pending(chan)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ cpu_relax();
+ }
+
+ return err;
+}
+
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -318,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
+static int ioat2_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it initialized */
+ u32 chanerr;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
/**
* ioat2_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -360,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
+ if (device->reset_hw(&ioat->base)) {
+ i = 0;
+ break;
+ }
}
dma->chancnt = i;
return i;
@@ -467,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
- u32 chanerr;
int order;
/* have we already been set up? */
@@ -477,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr) {
- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- }
-
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -746,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat);
-
- /* Delay 100ms after reset to allow internal DMA logic to quiesce
- * before removing DMA descriptor resources.
- */
- writeb(IOAT_CHANCMD_RESET,
- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
- mdelay(100);
+ device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
descs = ioat2_ring_space(ioat);
@@ -839,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
int err;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef..3afad8d 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data);
void ioat2_timer_event(unsigned long data);
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 42f6f10..9908c9e 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -650,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 3
- * sources
+ * sources (we need 1 extra source in the q-only continuation
+ * case and 3 extra sources in the p+q continuation case.
*/
- if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
+ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
with_ext = 1;
num_descs *= 2;
} else
@@ -1128,6 +1130,45 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
return 0;
}
+static int ioat3_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it
+ * initialized, with ioat3 specific workarounds
+ */
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ u32 chanerr;
+ u16 dev_id;
+ int err;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ /* -= IOAT ver.3 workarounds =- */
+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3, and clear any
+ * pending errors
+ */
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev, "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1137,10 +1178,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u16 dev_id;
u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1216,19 +1257,6 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_prep_dma_xor_val = NULL;
#endif
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
-
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
-
err = ioat_probe(device);
if (err)
return err;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index f015ec1..e8ae63b 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
+#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
/* MMIO Device Registers */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 645ca8d..ca6e6a0 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1470,7 +1470,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start, pdev->name))
+ resource_size(res), pdev->name))
return -EBUSY;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
@@ -1542,7 +1542,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
iop_chan->device = adev;
iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start);
+ resource_size(res));
if (!iop_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_iop_chan;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a..e80bae1 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
* @buffer_n: buffer number to update.
* 0 or 1 are the only valid values.
* @phyaddr: buffer physical address.
- * @return: Returns 0 on success or negative error code on failure. This
- * function will fail if the buffer is set to ready.
*/
/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static int ipu_update_channel_buffer(struct idmac_channel *ichan,
- int buffer_n, dma_addr_t phyaddr)
+static void ipu_update_channel_buffer(struct idmac_channel *ichan,
+ int buffer_n, dma_addr_t phyaddr)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
}
spin_unlock_irqrestore(&ipu_data.lock, flags);
-
- return 0;
}
/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
{
unsigned int chan_id = ichan->dma_chan.chan_id;
struct device *dev = &ichan->dma_chan.dev->device;
- int ret;
if (async_tx_test_ack(&desc->txd))
return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
* doing it again shouldn't hurt either.
*/
- ret = ipu_update_channel_buffer(ichan, buf_idx,
- sg_dma_address(sg));
-
- if (ret < 0) {
- dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
- sg, chan_id, buf_idx);
- return ret;
- }
+ ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
ipu_select_buffer(chan_id, buf_idx);
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ callback = descnew->txd.callback;
+ callback_param = descnew->txd.callback_param;
spin_unlock(&ichan->lock);
- callback(callback_param);
+ if (callback)
+ callback(callback_param);
spin_lock(&ichan->lock);
}
diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile
new file mode 100644
index 0000000..b3d259b
--- /dev/null
+++ b/drivers/dma/ppc4xx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
new file mode 100644
index 0000000..0a3478e
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.c
@@ -0,0 +1,5027 @@
+/*
+ * Copyright (C) 2006-2009 DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * Further porting to arch/powerpc by
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AMCC PPC440SPe Processors.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include "adma.h"
+
+enum ppc_adma_init_code {
+ PPC_ADMA_INIT_OK = 0,
+ PPC_ADMA_INIT_MEMRES,
+ PPC_ADMA_INIT_MEMREG,
+ PPC_ADMA_INIT_ALLOC,
+ PPC_ADMA_INIT_COHERENT,
+ PPC_ADMA_INIT_CHANNEL,
+ PPC_ADMA_INIT_IRQ1,
+ PPC_ADMA_INIT_IRQ2,
+ PPC_ADMA_INIT_REGISTER
+};
+
+static char *ppc_adma_errors[] = {
+ [PPC_ADMA_INIT_OK] = "ok",
+ [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
+ [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
+ [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
+ "structure",
+ [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
+ "hardware descriptors",
+ [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
+ [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
+ [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
+ [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
+};
+
+static enum ppc_adma_init_code
+ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
+
+struct ppc_dma_chan_ref {
+ struct dma_chan *chan;
+ struct list_head node;
+};
+
+/* The list of channels exported by ppc440spe ADMA */
+struct list_head
+ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch;
+
+/* Pointer to DMA0, DMA1 CP/CS FIFO */
+static void *ppc440spe_dma_fifo_buf;
+
+/* Pointers to last submitted to DMA0, DMA1 CDBs */
+static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
+static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
+
+/* Pointer to last linked and submitted xor CB */
+static struct ppc440spe_adma_desc_slot *xor_last_linked;
+static struct ppc440spe_adma_desc_slot *xor_last_submit;
+
+/* This array is used in data-check operations for storing a pattern */
+static char ppc440spe_qword[16];
+
+static atomic_t ppc440spe_adma_err_irq_ref;
+static dcr_host_t ppc440spe_mq_dcr_host;
+static unsigned int ppc440spe_mq_dcr_len;
+
+/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
+ * the block size in transactions, then we do not allow to activate more than
+ * only one RXOR transactions simultaneously. So use this var to store
+ * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
+ * set) or not (PPC440SPE_RXOR_RUN is clear).
+ */
+static unsigned long ppc440spe_rxor_state;
+
+/* These are used in enable & check routines
+ */
+static u32 ppc440spe_r6_enabled;
+static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
+static struct completion ppc440spe_r6_test_comp;
+
+static int ppc440spe_adma_dma2rxor_prep_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_rxor *cursor, int index,
+ int src_cnt, u32 addr);
+static void ppc440spe_adma_dma2rxor_set_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, dma_addr_t addr);
+static void ppc440spe_adma_dma2rxor_set_mult(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, u8 mult);
+
+#ifdef ADMA_LL_DEBUG
+#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
+#else
+#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
+#endif
+
+static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
+{
+ struct dma_cdb *cdb;
+ struct xor_cb *cb;
+ int i;
+
+ switch (chan->device->id) {
+ case 0:
+ case 1:
+ cdb = block;
+
+ pr_debug("CDB at %p [%d]:\n"
+ "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
+ "\t sg1u 0x%08x sg1l 0x%08x\n"
+ "\t sg2u 0x%08x sg2l 0x%08x\n"
+ "\t sg3u 0x%08x sg3l 0x%08x\n",
+ cdb, chan->device->id,
+ cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
+ le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
+ le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
+ le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
+ );
+ break;
+ case 2:
+ cb = block;
+
+ pr_debug("CB at %p [%d]:\n"
+ "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
+ "\t cbtah 0x%08x cbtal 0x%08x\n"
+ "\t cblah 0x%08x cblal 0x%08x\n",
+ cb, chan->device->id,
+ cb->cbc, cb->cbbc, cb->cbs,
+ cb->cbtah, cb->cbtal,
+ cb->cblah, cb->cblal);
+ for (i = 0; i < 16; i++) {
+ if (i && !cb->ops[i].h && !cb->ops[i].l)
+ continue;
+ pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
+ i, cb->ops[i].h, cb->ops[i].l);
+ }
+ break;
+ }
+}
+
+static void print_cb_list(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *iter)
+{
+ for (; iter; iter = iter->hw_next)
+ print_cb(chan, iter->hw_desc);
+}
+
+static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc: ", __func__, id);
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx ", src[i]);
+ pr_debug("dst:\n\t0x%016llx\n", dst);
+}
+
+static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc: ", __func__, id);
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx ", src[i]);
+ pr_debug("dst: ");
+ for (i = 0; i < 2; i++)
+ pr_debug("\t0x%016llx ", dst[i]);
+}
+
+static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
+ if (scf) {
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
+ } else {
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx(no) ", src[i]);
+ }
+
+ pr_debug("dst: ");
+ for (i = 0; i < 2; i++)
+ pr_debug("\t0x%016llx ", src[src_cnt + i]);
+}
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct xor_cb *p;
+
+ switch (chan->device->id) {
+ case PPC440SPE_XOR_ID:
+ p = desc->hw_desc;
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ /* NOP with interrupt */
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __func__);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
+{
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+
+/**
+ * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
+ */
+static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt, unsigned long flags)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = 1;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on completion */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
+ * operation in DMA2 controller
+ */
+static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt, unsigned long flags)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+ memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
+ desc->descs_per_op = 0;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on completion */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
+#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
+#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
+
+/**
+ * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
+ * with DMA0/1
+ */
+static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt, unsigned long flags,
+ unsigned long op)
+{
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_desc_slot *iter;
+ u8 dopc;
+
+ /* Common initialization of a PQ descriptors chain */
+ set_bits(op, &desc->flags);
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+
+ /* WXOR MULTICAST if both P and Q are being computed
+ * MV_SG1_SG2 if Q only
+ */
+ dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
+ DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
+
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ }
+ }
+
+ /* Set OPS depending on WXOR/RXOR type of operation */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
+ /* This is a WXOR only chain:
+ * - first descriptors are for zeroing destinations
+ * if PPC440SPE_ZERO_P/Q set;
+ * - descriptors remained are for GF-XOR operations.
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+
+ if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ } else {
+ /* This is either RXOR-only or mixed RXOR/WXOR */
+
+ /* The first 1 or 2 slots in chain are always RXOR,
+ * if need to calculate P & Q, then there are two
+ * RXOR slots; if only P or only Q, then there is one
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+
+ if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+
+ /* The remaining descs (if any) are WXORs */
+ if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ list_for_each_entry_from(iter, &desc->group_list,
+ chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ }
+ }
+}
+
+/**
+ * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
+ * for PQ_ZERO_SUM operation
+ */
+static void ppc440spe_desc_init_dma01pqzero_sum(
+ struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt)
+{
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_desc_slot *iter;
+ int i = 0;
+ u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
+ DMA_CDB_OPC_MV_SG1_SG2;
+ /*
+ * Initialize starting from 2nd or 3rd descriptor dependent
+ * on dst_cnt. First one or two slots are for cloning P
+ * and/or Q to chan->pdest and/or chan->qdest as we have
+ * to preserve original P/Q.
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+
+ if (dst_cnt > 1) {
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ }
+ /* initialize each source descriptor in chain */
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+
+ /* This is a ZERO_SUM operation:
+ * - <src_cnt> descriptors starting from 2nd or 3rd
+ * descriptor are for GF-XOR operations;
+ * - remaining <dst_cnt> descriptors are for checking the result
+ */
+ if (i++ < src_cnt)
+ /* MV_SG1_SG2 if only Q is being verified
+ * MULTICAST if both P and Q are being verified
+ */
+ hw_desc->opc = dopc;
+ else
+ /* DMA_CDB_OPC_DCHECK128 operation */
+ hw_desc->opc = DMA_CDB_OPC_DCHECK128;
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ /* always enable interrupt generation since we get
+ * the status of pqzero from the handler
+ */
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ }
+ }
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+}
+
+/**
+ * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
+ unsigned long flags)
+{
+ struct dma_cdb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &desc->flags);
+
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+}
+
+/**
+ * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
+ int value, unsigned long flags)
+{
+ struct dma_cdb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &desc->flags);
+
+ hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
+ hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
+ hw_desc->opc = DMA_CDB_OPC_DFILL128;
+}
+
+/**
+ * ppc440spe_desc_set_src_addr - set source address into the descriptor
+ */
+static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ int src_idx, dma_addr_t addrh,
+ dma_addr_t addrl)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ phys_addr_t addr64, tmplow, tmphi;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (!addrh) {
+ addr64 = addrl;
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
+ dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->ops[src_idx].l = addrl;
+ xor_hw_desc->ops[src_idx].h |= addrh;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
+ */
+static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, u32 mult_index,
+ int sg_index, unsigned char mult_value)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ u32 *psgu;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch (sg_index) {
+ /* for RXOR operations set multiplier
+ * into source cued address
+ */
+ case DMA_CDB_SG_SRC:
+ psgu = &dma_hw_desc->sg1u;
+ break;
+ /* for WXOR operations set multiplier
+ * into destination cued address(es)
+ */
+ case DMA_CDB_SG_DST1:
+ psgu = &dma_hw_desc->sg2u;
+ break;
+ case DMA_CDB_SG_DST2:
+ psgu = &dma_hw_desc->sg3u;
+ break;
+ default:
+ BUG();
+ }
+
+ *psgu |= cpu_to_le32(mult_value << mult_index);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
+ */
+static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ dma_addr_t addrh, dma_addr_t addrl,
+ u32 dst_idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ phys_addr_t addr64, tmphi, tmplow;
+ u32 *psgu, *psgl;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (!addrh) {
+ addr64 = addrl;
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+
+ psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
+ psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
+
+ *psgl = cpu_to_le32((u32)tmplow);
+ *psgu |= cpu_to_le32((u32)tmphi);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtal = addrl;
+ xor_hw_desc->cbtah |= addrh;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ u32 byte_count)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->cnt = cpu_to_le32(byte_count);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbbc = byte_count;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_rxor_block_size - set RXOR block size
+ */
+static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
+{
+ /* assume that byte_count is aligned on the 512-boundary;
+ * thus write it directly to the register (bits 23:31 are
+ * reserved there).
+ */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
+}
+
+/**
+ * ppc440spe_desc_set_dcheck - set CHECK pattern
+ */
+static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, u8 *qword)
+{
+ struct dma_cdb *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ iowrite32(qword[0], &dma_hw_desc->sg3l);
+ iowrite32(qword[4], &dma_hw_desc->sg3u);
+ iowrite32(qword[8], &dma_hw_desc->sg2l);
+ iowrite32(qword[12], &dma_hw_desc->sg2u);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc440spe_xor_set_link - set link address in xor CB
+ */
+static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
+ __func__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = next_desc->phys;
+ xor_hw_desc->cblah = 0;
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * ppc440spe_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *prev_desc,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ unsigned long flags;
+ struct ppc440spe_adma_desc_slot *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __func__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ break;
+ case PPC440SPE_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ ppc440spe_xor_set_link(prev_desc, next_desc);
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
+ */
+static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, int src_idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ /* May have 0, 1, 2, or 3 sources */
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ if (unlikely(src_idx)) {
+ printk(KERN_ERR "%s: try to get %d source for"
+ " DCHECK128\n", __func__, src_idx);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case DMA_CDB_OPC_MULTICAST:
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ if (unlikely(src_idx > 2)) {
+ printk(KERN_ERR "%s: try to get %d source from"
+ " DMA descr\n", __func__, src_idx);
+ BUG();
+ }
+ if (src_idx) {
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ u8 region;
+
+ if (src_idx == 1)
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ desc->unmap_len;
+
+ region = (le32_to_cpu(
+ dma_hw_desc->sg1u)) >>
+ DMA_CUED_REGION_OFF;
+
+ region &= DMA_CUED_REGION_MSK;
+ switch (region) {
+ case DMA_RXOR123:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 1);
+ case DMA_RXOR124:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len * 3);
+ case DMA_RXOR125:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 2);
+ default:
+ printk(KERN_ERR
+ "%s: try to"
+ " get src3 for region %02x"
+ "PPC440SPE_DESC_RXOR12?\n",
+ __func__, region);
+ BUG();
+ }
+ } else {
+ printk(KERN_ERR
+ "%s: try to get %d"
+ " source for non-cued descr\n",
+ __func__, src_idx);
+ BUG();
+ }
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case PPC440SPE_XOR_ID:
+ /* May have up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->ops[src_idx].l;
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, int idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ if (likely(!idx))
+ return le32_to_cpu(dma_hw_desc->sg2l);
+ return le32_to_cpu(dma_hw_desc->sg3l);
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbtal;
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_src_num - extract the number of source addresses from
+ * the descriptor
+ */
+static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ return 1;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_MULTICAST:
+ /*
+ * Only for RXOR operations we have more than
+ * one source
+ */
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ /* RXOR op, there are 2 or 3 sources */
+ if (((le32_to_cpu(dma_hw_desc->sg1u) >>
+ DMA_CUED_REGION_OFF) &
+ DMA_CUED_REGION_MSK) == DMA_RXOR12) {
+ /* RXOR 1-2 */
+ return 2;
+ } else {
+ /* RXOR 1-2-3/1-2-4/1-2-5 */
+ return 3;
+ }
+ }
+ return 1;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC440SPE_XOR_ID:
+ /* up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_dst_num - get the number of destination addresses in
+ * this descriptor
+ */
+static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_cdb *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* May be 1 or 2 destinations */
+ dma_hw_desc = desc->hw_desc;
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DCHECK128:
+ return 0;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_DFILL128:
+ return 1;
+ case DMA_CDB_OPC_MULTICAST:
+ if (desc->dst_cnt == 2)
+ return 2;
+ else
+ return 1;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC440SPE_XOR_ID:
+ /* Always only 1 destination */
+ return 1;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * ppc440spe_desc_is_aligned - check alignment
+ */
+static inline int ppc440spe_desc_is_aligned(
+ struct ppc440spe_adma_desc_slot *desc, int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/**
+ * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
+ * DMA2 PQ operation
+ */
+static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
+ int src_cnt, size_t len)
+{
+ signed long long order = 0;
+ int state = 0;
+ int addr_count = 0;
+ int i;
+ for (i = 1; i < src_cnt; i++) {
+ dma_addr_t cur_addr = srcs[i];
+ dma_addr_t old_addr = srcs[i-1];
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else {
+ state = 3;
+ }
+ break;
+ case 1:
+ if (i == src_cnt-2 || (order == -1
+ && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ addr_count++;
+ } else if (cur_addr == old_addr + len*order) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (cur_addr == old_addr + 2*len) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (cur_addr == old_addr + 3*len) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else {
+ order = 0;
+ state = 0;
+ addr_count++;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ addr_count++;
+ break;
+ }
+ if (state == 3)
+ break;
+ }
+ if (src_cnt <= 1 || (state != 1 && state != 2)) {
+ pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
+ __func__, src_cnt, state, addr_count, order);
+ for (i = 0; i < src_cnt; i++)
+ pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ BUG();
+ }
+
+ return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
+}
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static u32
+ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
+static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
+
+/**
+ * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static void ppc440spe_adma_device_clear_eot_status(
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+ u8 *p = chan->device->dma_desc_pool_virt;
+ struct dma_cdb *cdb;
+ u32 rv, i;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* read FIFO to ack */
+ dma_reg = chan->device->dma_reg;
+ while ((rv = ioread32(&dma_reg->csfpl))) {
+ i = rv & DMA_CDB_ADDR_MSK;
+ cdb = (struct dma_cdb *)&p[i -
+ (u32)chan->device->dma_desc_pool];
+
+ /* Clear opcode to ack. This is necessary for
+ * ZeroSum operations only
+ */
+ cdb->opc = 0;
+
+ if (test_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state)) {
+ /* probably this is a completed RXOR op,
+ * get pointer to CDB using the fact that
+ * physical and virtual addresses of CDB
+ * in pools have the same offsets
+ */
+ if (le32_to_cpu(cdb->sg1u) &
+ DMA_CUED_XOR_BASE) {
+ /* this is a RXOR */
+ clear_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state);
+ }
+ }
+
+ if (rv & DMA_CDB_STATUS_MSK) {
+ /* ZeroSum check failed
+ */
+ struct ppc440spe_adma_desc_slot *iter;
+ dma_addr_t phys = rv & ~DMA_CDB_MSK;
+
+ /*
+ * Update the status of corresponding
+ * descriptor.
+ */
+ list_for_each_entry(iter, &chan->chain,
+ chain_node) {
+ if (iter->phys == phys)
+ break;
+ }
+ /*
+ * if cannot find the corresponding
+ * slot it's a bug
+ */
+ BUG_ON(&iter->chain_node == &chan->chain);
+
+ if (iter->xor_check_result) {
+ if (test_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags)) {
+ *iter->xor_check_result |=
+ SUM_CHECK_P_RESULT;
+ } else
+ if (test_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags)) {
+ *iter->xor_check_result |=
+ SUM_CHECK_Q_RESULT;
+ } else
+ BUG();
+ }
+ }
+ }
+
+ rv = ioread32(&dma_reg->dsts);
+ if (rv) {
+ pr_err("DMA%d err status: 0x%x\n",
+ chan->device->id, rv);
+ /* write back to clear */
+ iowrite32(rv, &dma_reg->dsts);
+ }
+ break;
+ case PPC440SPE_XOR_ID:
+ /* reset status bits to ack */
+ xor_reg = chan->device->xor_reg;
+ rv = ioread32be(&xor_reg->sr);
+ iowrite32be(rv, &xor_reg->sr);
+
+ if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
+ if (rv & XOR_IE_RPTIE_BIT) {
+ /* Read PLB Timeout Error.
+ * Try to resubmit the CB
+ */
+ u32 val = ioread32be(&xor_reg->ccbalr);
+
+ iowrite32be(val, &xor_reg->cblalr);
+
+ val = ioread32be(&xor_reg->crsr);
+ iowrite32be(val | XOR_CRSR_XAE_BIT,
+ &xor_reg->crsr);
+ } else
+ pr_err("XOR ERR 0x%x status\n", rv);
+ break;
+ }
+
+ /* if the XORcore is idle, but there are unprocessed CBs
+ * then refetch the s/w chain here
+ */
+ if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
+ do_xor_refetch)
+ ppc440spe_chan_append(chan);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_chan_is_busy - get the channel status
+ */
+static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+ int busy = 0;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_reg = chan->device->dma_reg;
+ /* if command FIFO's head and tail pointers are equal and
+ * status tail is the same as command, then channel is free
+ */
+ if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
+ ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
+ busy = 1;
+ break;
+ case PPC440SPE_XOR_ID:
+ /* use the special status bit for the XORcore
+ */
+ xor_reg = chan->device->xor_reg;
+ busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ }
+
+ return busy;
+}
+
+/**
+ * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
+ */
+static void ppc440spe_chan_set_first_xor_descriptor(
+ struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ struct xor_regs *xor_reg = chan->device->xor_reg;
+
+ if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __func__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
+
+ iowrite32be(next_desc->phys, &xor_reg->cblalr);
+ iowrite32be(0, &xor_reg->cblahr);
+ iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
+ &xor_reg->cbcr);
+
+ chan->hw_chain_inited = 1;
+}
+
+/**
+ * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
+ * called with irqs disabled
+ */
+static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ u32 pcdb;
+ struct dma_regs *dma_reg = chan->device->dma_reg;
+
+ pcdb = desc->phys;
+ if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
+ pcdb |= DMA_CDB_NO_INT;
+
+ chan_last_sub[chan->device->id] = desc;
+
+ ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
+
+ iowrite32(pcdb, &dma_reg->cpfpl);
+}
+
+/**
+ * ppc440spe_chan_append - update the h/w chain in the channel
+ */
+static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
+{
+ struct xor_regs *xor_reg;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct xor_cb *xcb;
+ u32 cur_desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ cur_desc = ppc440spe_chan_get_current_descriptor(chan);
+
+ if (likely(cur_desc)) {
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ ppc440spe_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ break;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ ppc440spe_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ case PPC440SPE_XOR_ID:
+ /* update h/w links and refetch */
+ if (!xor_last_submit->hw_next)
+ break;
+
+ xor_reg = chan->device->xor_reg;
+ /* the last linked CDB has to generate an interrupt
+ * that we'd be able to append the next lists to h/w
+ * regardless of the XOR engine state at the moment of
+ * appending of these next lists
+ */
+ xcb = xor_last_linked->hw_desc;
+ xcb->cbc |= XOR_CBCR_CBCE_BIT;
+
+ if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
+ /* XORcore is idle. Refetch now */
+ do_xor_refetch = 0;
+ ppc440spe_xor_set_link(xor_last_submit,
+ xor_last_submit->hw_next);
+
+ ADMA_LL_DBG(print_cb_list(chan,
+ xor_last_submit->hw_next));
+
+ xor_last_submit = xor_last_linked;
+ iowrite32be(ioread32be(&xor_reg->crsr) |
+ XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
+ &xor_reg->crsr);
+ } else {
+ /* XORcore is running. Refetch later in the handler */
+ do_xor_refetch = 1;
+ }
+
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static u32
+ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_reg = chan->device->dma_reg;
+ return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
+ case PPC440SPE_XOR_ID:
+ xor_reg = chan->device->xor_reg;
+ return ioread32be(&xor_reg->ccbalr);
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_chan_run - enable the channel
+ */
+static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
+{
+ struct xor_regs *xor_reg;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* DMAs are always enabled, do nothing */
+ break;
+ case PPC440SPE_XOR_ID:
+ /* drain write buffer */
+ xor_reg = chan->device->xor_reg;
+
+ /* fetch descriptor pointed to in <link> */
+ iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
+ &xor_reg->crsr);
+ break;
+ }
+}
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
+static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
+
+static dma_cookie_t
+ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+static void
+ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+
+static void
+ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t *paddr, unsigned long flags);
+static void
+ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+static void
+ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
+ unsigned char mult, int index, int dst_pos);
+static void
+ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t paddr, dma_addr_t qaddr);
+
+static struct page *ppc440spe_rxor_srcs[32];
+
+/**
+ * ppc440spe_can_rxor - check if the operands may be processed with RXOR
+ */
+static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
+{
+ int i, order = 0, state = 0;
+ int idx = 0;
+
+ if (unlikely(!(src_cnt > 1)))
+ return 0;
+
+ BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
+
+ /* Skip holes in the source list before checking */
+ for (i = 0; i < src_cnt; i++) {
+ if (!srcs[i])
+ continue;
+ ppc440spe_rxor_srcs[idx++] = srcs[i];
+ }
+ src_cnt = idx;
+
+ for (i = 1; i < src_cnt; i++) {
+ char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
+ char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
+
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ } else if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ } else
+ goto out;
+ break;
+ case 1:
+ if ((i == src_cnt - 2) ||
+ (order == -1 && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ } else if ((cur_addr == old_addr + len * order) ||
+ (cur_addr == old_addr + 2 * len) ||
+ (cur_addr == old_addr + 3 * len)) {
+ state = 2;
+ } else {
+ order = 0;
+ state = 0;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ break;
+ }
+ }
+
+out:
+ if (state == 1 || state == 2)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ppc440spe_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @dst_lst: array of destination pointers
+ * @dst_cnt: number of destination operands
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+static int ppc440spe_adma_estimate(struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
+ struct page **src_lst, int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
+ /* If RAID-6 capabilities were not activated don't try
+ * to use them
+ */
+ if (unlikely(!ppc440spe_r6_enabled))
+ return -1;
+ }
+ /* In the current implementation of ppc440spe ADMA driver it
+ * makes sense to pick out only pq case, because it may be
+ * processed:
+ * (1) either using Biskup method on DMA2;
+ * (2) or on DMA0/1.
+ * Thus we give a favour to (1) if the sources are suitable;
+ * else let it be processed on one of the DMA0/1 engines.
+ * In the sum_product case where destination is also the
+ * source process it on DMA0/1 only.
+ */
+ if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
+
+ if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
+ ef = 0; /* sum_product case, process on DMA0/1 */
+ else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
+ ef = 3; /* override (DMA0/1 + idle) */
+ else
+ ef = 0; /* can't process on DMA2 if !rxor */
+ }
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
+ ef++;
+
+ return ef;
+}
+
+struct dma_chan *
+ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
+ struct page **dst_lst, int dst_cnt, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ struct dma_chan *best_chan = NULL;
+ struct ppc_dma_chan_ref *ref;
+ int best_rank = -1;
+
+ if (unlikely(!src_sz))
+ return NULL;
+ if (src_sz > PAGE_SIZE) {
+ /*
+ * should a user of the api ever pass > PAGE_SIZE requests
+ * we sort out cases where temporary page-sized buffers
+ * are used.
+ */
+ switch (cap) {
+ case DMA_PQ:
+ if (src_cnt == 1 && dst_lst[1] == src_lst[0])
+ return NULL;
+ if (src_cnt == 2 && dst_lst[1] == src_lst[1])
+ return NULL;
+ break;
+ case DMA_PQ_VAL:
+ case DMA_XOR_VAL:
+ return NULL;
+ default:
+ break;
+ }
+ }
+
+ list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
+ if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
+ int rank;
+
+ rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
+ dst_cnt, src_lst, src_cnt, src_sz);
+ if (rank > best_rank) {
+ best_rank = rank;
+ best_chan = ref->chan;
+ }
+ }
+ }
+
+ return best_chan;
+}
+EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
+
+/**
+ * ppc440spe_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static struct ppc440spe_adma_desc_slot *
+ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
+{
+ struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * ppc440spe_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &ppc440spe_chan->lock while calling this function
+ */
+static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
+ struct ppc440spe_adma_chan *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+}
+
+static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = ppc440spe_desc_get_src_num(desc, chan);
+ dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
+
+ /* unmap destinations */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ while (dst_cnt--) {
+ addr = ppc440spe_desc_get_dest_addr(
+ desc, chan, dst_cnt);
+ dma_unmap_page(chan->device->dev,
+ addr, desc->unmap_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* unmap sources */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = ppc440spe_desc_get_src_addr(
+ desc, chan, src_cnt);
+ dma_unmap_page(chan->device->dev,
+ addr, desc->unmap_len,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+/**
+ * ppc440spe_adma_run_tx_complete_actions - call functions to be called
+ * upon completion
+ */
+static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ struct ppc440spe_adma_desc_slot *unmap =
+ desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ ppc440spe_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == ppc440spe_chan_get_current_descriptor(chan))
+ return 1;
+
+ if (chan->device->id != PPC440SPE_XOR_ID) {
+ /* our DMA interrupt handler clears opc field of
+ * each processed descriptor. For all types of
+ * operations except for ZeroSum we do not actually
+ * need ack from the interrupt handler. ZeroSum is a
+ * special case since the result of this operation
+ * is available from the handler only, so if we see
+ * such type of descriptor (which is unprocessed yet)
+ * then leave it in chain.
+ */
+ struct dma_cdb *cdb = desc->hw_desc;
+ if (cdb->opc == DMA_CDB_OPC_DCHECK128)
+ return 1;
+ }
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ ppc440spe_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
+ int busy = ppc440spe_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
+ chan->device->id, __func__);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
+ "busy: %d this_desc: %#llx next_desc: %#x "
+ "cur: %#x ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ ppc440spe_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || ppc440spe_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = ppc440spe_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = ppc440spe_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum completion */
+ if (cookie > 0)
+ chan->completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (ppc440spe_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->completed_cookie = cookie;
+ pr_debug("\tcompleted cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * ppc440spe_adma_tasklet - clean up watch-dog initiator
+ */
+static void ppc440spe_adma_tasklet(unsigned long data)
+{
+ struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
+
+ spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
+ __ppc440spe_adma_slot_cleanup(chan);
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __ppc440spe_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc440spe_adma_alloc_slots - allocate free slots (if any)
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
+ struct ppc440spe_adma_chan *chan, int num_slots,
+ int slots_per_op)
+{
+ struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
+ struct ppc440spe_adma_desc_slot *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+
+ if (slots_found == num_slots) {
+ struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
+ struct ppc440spe_adma_desc_slot *last_used = NULL;
+
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op)
+ async_tx_ack(&iter->async_tx);
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ iter->xor_check_result = NULL;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ init = ppc440spe_chan->slots_allocated ? 0 : 1;
+ chan->chan_id = ppc440spe_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = ppc440spe_chan->slots_allocated;
+ if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
+ db_sz = sizeof(struct dma_cdb);
+ else
+ db_sz = sizeof(struct xor_cb);
+
+ for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
+ slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
+ GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "SPE ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
+ slot->idx = i;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ ppc440spe_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ }
+
+ if (i && !ppc440spe_chan->last_used) {
+ ppc440spe_chan->last_used =
+ list_entry(ppc440spe_chan->all_slots.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: allocated %d descriptor slots\n",
+ ppc440spe_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (ppc440spe_chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ ppc440spe_chan->hw_chain_inited = 0;
+ /* Use WXOR for self-testing */
+ if (!ppc440spe_r6_tchan)
+ ppc440spe_r6_tchan = ppc440spe_chan;
+ break;
+ case PPC440SPE_XOR_ID:
+ ppc440spe_chan_start_null_xor(ppc440spe_chan);
+ break;
+ default:
+ BUG();
+ }
+ ppc440spe_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * ppc440spe_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t ppc440spe_desc_assign_cookie(
+ struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+/**
+ * ppc440spe_rxor_set_region_data -
+ */
+static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, u32 mask)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= mask;
+}
+
+/**
+ * ppc440spe_rxor_set_src -
+ */
+static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, dma_addr_t addr)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
+ xcb->ops[xor_arg_no].l = addr;
+}
+
+/**
+ * ppc440spe_rxor_set_mult -
+ */
+static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, u8 idx, u8 mult)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
+}
+
+/**
+ * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
+{
+ dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+
+ if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ ppc440spe_chan_append(chan);
+ }
+}
+
+/**
+ * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc;
+ struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
+ struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+
+ sw_desc = tx_to_ppc440spe_adma_slot(tx);
+
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ ppc440spe_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
+ chan->device->id, __func__,
+ sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+
+ return cookie;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
+ __func__);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (unlikely(!len))
+ return NULL;
+
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s len: %u int_en %d\n",
+ ppc440spe_chan->device->id, __func__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_memcpy(group_start, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dma_dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (unlikely(!len))
+ return NULL;
+
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
+ ppc440spe_chan->device->id, __func__, value, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_memset(group_start, value, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, u32 src_cnt, size_t len,
+ unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
+ dma_dest, dma_src, src_cnt));
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc440spe_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_xor(group_start, src_cnt, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ while (src_cnt--)
+ ppc440spe_adma_memcpy_xor_set_src(group_start,
+ dma_src[src_cnt], src_cnt);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static inline void
+ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt);
+static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
+
+/**
+ * ppc440spe_adma_init_dma2rxor_slot -
+ */
+static void ppc440spe_adma_init_dma2rxor_slot(
+ struct ppc440spe_adma_desc_slot *desc,
+ dma_addr_t *src, int src_cnt)
+{
+ int i;
+
+ /* initialize CDB */
+ for (i = 0; i < src_cnt; i++) {
+ ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
+ desc->src_cnt, (u32)src[i]);
+ }
+}
+
+/**
+ * ppc440spe_dma01_prep_mult -
+ * for Q operation where destination is also the source
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ unsigned long op = 0;
+ int slot_cnt;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ slot_cnt = 2;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ /* use WXOR, each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ struct ppc440spe_adma_chan *chan;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct dma_cdb *hw_desc;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ set_bits(op, &sw_desc->flags);
+ sw_desc->src_cnt = src_cnt;
+ sw_desc->dst_cnt = dst_cnt;
+ /* First descriptor, zero data in the destination and copy it
+ * to q page using MULTICAST transfer.
+ */
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MULTICAST;
+
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, dst[0], 0);
+ ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /*
+ * Second descriptor, multiply data from the q page
+ * and store the result in real destination.
+ */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ DMA_CUED_XOR_HB, dst[1]);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, dst[0], 0);
+
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+/**
+ * ppc440spe_dma01_prep_sum_product -
+ * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
+ * the source.
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ unsigned long op = 0;
+ int slot_cnt;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ slot_cnt = 3;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ /* WXOR, each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ struct ppc440spe_adma_chan *chan;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct dma_cdb *hw_desc;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ set_bits(op, &sw_desc->flags);
+ sw_desc->src_cnt = src_cnt;
+ sw_desc->dst_cnt = 1;
+ /* 1st descriptor, src[1] data to q page and zero destination */
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MULTICAST;
+
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->qdest, 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[1]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /* 2nd descriptor, multiply src[1] data and store the
+ * result in destination */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ ppc440spe_chan->qdest);
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[1]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /*
+ * 3rd descriptor, multiply src[0] data and xor it
+ * with destination
+ */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[0]);
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ int slot_cnt;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned char mult = 1;
+
+ pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
+ __func__, dst_cnt, src_cnt, len);
+ /* select operations WXOR/RXOR depending on the
+ * source addresses of operators and the number
+ * of destinations (RXOR support only Q-parity calculations)
+ */
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
+ /* no active RXOR;
+ * do RXOR if:
+ * - there are more than 1 source,
+ * - len is aligned on 512-byte boundary,
+ * - source addresses fit to one of 4 possible regions.
+ */
+ if (src_cnt > 1 &&
+ !(len & MQ0_CF2H_RXOR_BS_MASK) &&
+ (src[0] + len) == src[1]) {
+ /* may do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR, &op);
+ if (src_cnt != 2) {
+ /* may try to enhance region of RXOR */
+ if ((src[1] + len) == src[2]) {
+ /* do RXOR R1 R2 R3 */
+ set_bit(PPC440SPE_DESC_RXOR123,
+ &op);
+ } else if ((src[1] + len * 2) == src[2]) {
+ /* do RXOR R1 R2 R4 */
+ set_bit(PPC440SPE_DESC_RXOR124, &op);
+ } else if ((src[1] + len * 3) == src[2]) {
+ /* do RXOR R1 R2 R5 */
+ set_bit(PPC440SPE_DESC_RXOR125,
+ &op);
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR12,
+ &op);
+ }
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR12, &op);
+ }
+ }
+
+ if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
+ /* can not do this operation with RXOR */
+ clear_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state);
+ } else {
+ /* can do; set block size right now */
+ ppc440spe_desc_set_rxor_block_size(len);
+ }
+ }
+
+ /* Number of necessary slots depends on operation type selected */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
+ /* This is a WXOR only chain. Need descriptors for each
+ * source to GF-XOR them with WXOR, and need descriptors
+ * for each destination to zero them with WXOR
+ */
+ slot_cnt = src_cnt;
+
+ if (flags & DMA_PREP_ZERO_P) {
+ slot_cnt++;
+ set_bit(PPC440SPE_ZERO_P, &op);
+ }
+ if (flags & DMA_PREP_ZERO_Q) {
+ slot_cnt++;
+ set_bit(PPC440SPE_ZERO_Q, &op);
+ }
+ } else {
+ /* Need 1/2 descriptor for RXOR operation, and
+ * need (src_cnt - (2 or 3)) for WXOR of sources
+ * remained (if any)
+ */
+ slot_cnt = dst_cnt;
+
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC440SPE_ZERO_P, &op);
+ if (flags & DMA_PREP_ZERO_Q)
+ set_bit(PPC440SPE_ZERO_Q, &op);
+
+ if (test_bit(PPC440SPE_DESC_RXOR12, &op))
+ slot_cnt += src_cnt - 2;
+ else
+ slot_cnt += src_cnt - 3;
+
+ /* Thus we have either RXOR only chain or
+ * mixed RXOR/WXOR
+ */
+ if (slot_cnt == dst_cnt)
+ /* RXOR only chain */
+ clear_bit(PPC440SPE_DESC_WXOR, &op);
+ }
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ /* for both RXOR/WXOR each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
+ flags, op);
+
+ /* setup dst/src/mult */
+ pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
+ __func__, dst[0], dst[1]);
+ ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
+ while (src_cnt--) {
+ ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
+ src_cnt);
+
+ /* NOTE: "Multi = 0 is equivalent to = 1" as it
+ * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
+ * doesn't work for RXOR with DMA0/1! Instead, multi=0
+ * leads to zeroing source data after RXOR.
+ * So, for P case set-up mult=1 explicitly.
+ */
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ mult = scf[src_cnt];
+ ppc440spe_adma_pq_set_src_mult(sw_desc,
+ mult, src_cnt, dst_cnt - 1);
+ }
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list,
+ chain_node) {
+ ppc440spe_desc_set_byte_count(iter,
+ ppc440spe_chan, len);
+ iter->unmap_len = len;
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ int slot_cnt, descs_per_op;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned char mult = 1;
+
+ BUG_ON(!dst_cnt);
+ /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
+ __func__, dst_cnt, src_cnt, len);*/
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
+ if (descs_per_op < 0) {
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ return NULL;
+ }
+
+ /* depending on number of sources we have 1 or 2 RXOR chains */
+ slot_cnt = descs_per_op * dst_cnt;
+
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ op = slot_cnt;
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
+ --op ? 0 : flags);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = len;
+
+ ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
+ iter->rxor_cursor.len = len;
+ iter->descs_per_op = descs_per_op;
+ }
+ op = 0;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ op++;
+ if (op % descs_per_op == 0)
+ ppc440spe_adma_init_dma2rxor_slot(iter, src,
+ src_cnt);
+ if (likely(!list_is_last(&iter->chain_node,
+ &sw_desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next =
+ list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ ppc440spe_xor_set_link(iter, iter->hw_next);
+ } else {
+ /* this is the last descriptor. */
+ iter->hw_next = NULL;
+ }
+ }
+
+ /* fixup head descriptor */
+ sw_desc->dst_cnt = dst_cnt;
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
+ if (flags & DMA_PREP_ZERO_Q)
+ set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
+
+ /* setup dst/src/mult */
+ ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
+
+ while (src_cnt--) {
+ /* handle descriptors (if dst_cnt == 2) inside
+ * the ppc440spe_adma_pq_set_srcxxx() functions
+ */
+ ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
+ src_cnt);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ mult = scf[src_cnt];
+ ppc440spe_adma_pq_set_src_mult(sw_desc,
+ mult, src_cnt, dst_cnt - 1);
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ ppc440spe_desc_set_rxor_block_size(len);
+ return sw_desc;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ int dst_cnt = 0;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
+ dst, src, src_cnt));
+ BUG_ON(!len);
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(!src_cnt);
+
+ if (src_cnt == 1 && dst[1] == src[0]) {
+ dma_addr_t dest[2];
+
+ /* dst[1] is real destination (Q) */
+ dest[0] = dst[1];
+ /* this is the page to multicast source data to */
+ dest[1] = ppc440spe_chan->qdest;
+ sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
+ dest, 2, src, src_cnt, scf, len, flags);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+ }
+
+ if (src_cnt == 2 && dst[1] == src[1]) {
+ sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
+ &dst[1], src, 2, scf, len, flags);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
+ BUG_ON(!dst[0]);
+ dst_cnt++;
+ flags |= DMA_PREP_ZERO_P;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
+ BUG_ON(!dst[1]);
+ dst_cnt++;
+ flags |= DMA_PREP_ZERO_Q;
+ }
+
+ BUG_ON(!dst_cnt);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc440spe_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ switch (ppc440spe_chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
+ dst, dst_cnt, src, src_cnt, scf,
+ len, flags);
+ break;
+
+ case PPC440SPE_XOR_ID:
+ sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
+ dst, dst_cnt, src, src_cnt, scf,
+ len, flags);
+ break;
+ }
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
+ * a PQ_ZERO_SUM operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *iter;
+ dma_addr_t pdest, qdest;
+ int slot_cnt, slots_per_op, idst, dst_cnt;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pdest = 0;
+ else
+ pdest = pq[0];
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ qdest = 0;
+ else
+ qdest = pq[1];
+
+ ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
+ src, src_cnt, scf));
+
+ /* Always use WXOR for P/Q calculations (two destinations).
+ * Need 1 or 2 extra slots to verify results are zero.
+ */
+ idst = dst_cnt = (pdest && qdest) ? 2 : 1;
+
+ /* One additional slot per destination to clone P/Q
+ * before calculation (we have to preserve destinations).
+ */
+ slot_cnt = src_cnt + dst_cnt * 2;
+ slots_per_op = 1;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
+
+ /* Setup byte count for each slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = len;
+ }
+
+ if (pdest) {
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_chan *chan;
+
+ iter = sw_desc->group_head;
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->pdest, 0);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = 0;
+ /* override pdest to preserve original P */
+ pdest = ppc440spe_chan->pdest;
+ }
+ if (qdest) {
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_chan *chan;
+
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+
+ if (pdest) {
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->qdest, 0);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = 0;
+ /* override qdest to preserve original Q */
+ qdest = ppc440spe_chan->qdest;
+ }
+
+ /* Setup destinations for P/Q ops */
+ ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
+
+ /* Setup zero QWORDs into DCHECK CDBs */
+ idst = dst_cnt;
+ list_for_each_entry_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ /*
+ * The last CDB corresponds to Q-parity check,
+ * the one before last CDB corresponds
+ * P-parity check
+ */
+ if (idst == DMA_DEST_MAX_NUM) {
+ if (idst == dst_cnt) {
+ set_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags);
+ }
+ } else {
+ if (qdest) {
+ set_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags);
+ }
+ }
+ iter->xor_check_result = pqres;
+
+ /*
+ * set it to zero, if check fail then result will
+ * be updated
+ */
+ *iter->xor_check_result = 0;
+ ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
+ ppc440spe_qword);
+
+ if (!(--dst_cnt))
+ break;
+ }
+
+ /* Setup sources and mults for P/Q ops */
+ list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ struct ppc440spe_adma_chan *chan;
+ u32 mult_dst;
+
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ DMA_CUED_XOR_HB,
+ src[src_cnt - 1]);
+ if (qdest) {
+ mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ ppc440spe_desc_set_src_mult(iter, chan,
+ DMA_CUED_MULT1_OFF,
+ mult_dst,
+ scf[src_cnt - 1]);
+ }
+ if (!(--src_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
+ * XOR ZERO_SUM operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t pq[2];
+
+ /* validate P, disable Q */
+ pq[0] = src[0];
+ pq[1] = 0;
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+
+ tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
+ src_cnt - 1, 0, len,
+ result, flags);
+ return tx;
+}
+
+/**
+ * ppc440spe_adma_set_dest - set destination address into descriptor
+ */
+static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* to do: support transfers lengths >
+ * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ ppc440spe_desc_set_dest_addr(sw_desc->group_head,
+ chan, 0, addr, index);
+ break;
+ case PPC440SPE_XOR_ID:
+ sw_desc = ppc440spe_get_group_entry(sw_desc, index);
+ ppc440spe_desc_set_dest_addr(sw_desc,
+ chan, 0, addr, index);
+ break;
+ }
+}
+
+static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
+ struct ppc440spe_adma_chan *chan, dma_addr_t addr)
+{
+ /* To clear destinations update the descriptor
+ * (P or Q depending on index) as follows:
+ * addr is destination (0 corresponds to SG2):
+ */
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
+
+ /* ... and the addr is source: */
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
+
+ /* addr is always SG2 then the mult is always DST1 */
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, 1);
+}
+
+/**
+ * ppc440spe_adma_pq_set_dest - set destination address into descriptor
+ * for the PQXOR operation
+ */
+static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t *addrs, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *iter;
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t paddr, qaddr;
+ dma_addr_t addr = 0, ppath, qpath;
+ int index = 0, i;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ paddr = 0;
+ else
+ paddr = addrs[0];
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ qaddr = 0;
+ else
+ qaddr = addrs[1];
+
+ if (!paddr || !qaddr)
+ addr = paddr ? paddr : qaddr;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot:
+ */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ /* This is WXOR-only chain; may have 1/2 zero descs */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ index++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ index++;
+
+ iter = ppc440spe_get_group_entry(sw_desc, index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node)
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ }
+
+ if (index) {
+ /* To clear destinations update the descriptor
+ * (1st,2nd, or both depending on flags)
+ */
+ index = 0;
+ if (test_bit(PPC440SPE_ZERO_P,
+ &sw_desc->flags)) {
+ iter = ppc440spe_get_group_entry(
+ sw_desc, index++);
+ ppc440spe_adma_pq_zero_op(iter, chan,
+ paddr);
+ }
+
+ if (test_bit(PPC440SPE_ZERO_Q,
+ &sw_desc->flags)) {
+ iter = ppc440spe_get_group_entry(
+ sw_desc, index++);
+ ppc440spe_adma_pq_zero_op(iter, chan,
+ qaddr);
+ }
+
+ return;
+ }
+ } else {
+ /* This is RXOR-only or RXOR/WXOR mixed chain */
+
+ /* If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+ qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ /* Setup destination(s) in RXOR slot(s) */
+ iter = ppc440spe_get_group_entry(sw_desc, index++);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ if (!addr) {
+ /* two destinations */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index++);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ qpath, qaddr, 0);
+ }
+
+ if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
+ /* Setup destination(s) in remaining WXOR
+ * slots
+ */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node)
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ addr, 0);
+
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node) {
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ paddr, 0);
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ qaddr, 1);
+ }
+ }
+ }
+
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ /* DMA2 descriptors have only 1 destination, so there are
+ * two chains - one for each dest.
+ * If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ iter = ppc440spe_get_group_entry(sw_desc, 0);
+ for (i = 0; i < sw_desc->descs_per_op; i++) {
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ if (!addr) {
+ /* Two destinations; setup Q here */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ for (i = 0; i < sw_desc->descs_per_op; i++) {
+ ppc440spe_desc_set_dest_addr(iter,
+ chan, qpath, qaddr, 0);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+ }
+
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
+ * for the PQ_ZERO_SUM operation
+ */
+static void ppc440spe_adma_pqzero_sum_set_dest(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t paddr, dma_addr_t qaddr)
+{
+ struct ppc440spe_adma_desc_slot *iter, *end;
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t addr = 0;
+ int idx;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot
+ */
+ idx = (paddr && qaddr) ? 2 : 1;
+ /* set end */
+ list_for_each_entry_reverse(end, &sw_desc->group_list,
+ chain_node) {
+ if (!(--idx))
+ break;
+ }
+ /* set start */
+ idx = (paddr && qaddr) ? 2 : 1;
+ iter = ppc440spe_get_group_entry(sw_desc, idx);
+
+ if (paddr && qaddr) {
+ /* two destinations */
+ list_for_each_entry_from(iter, &sw_desc->group_list,
+ chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ } else {
+ /* one destination */
+ addr = paddr ? paddr : qaddr;
+ list_for_each_entry_from(iter, &sw_desc->group_list,
+ chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ }
+ }
+
+ /* The remaining descriptors are DATACHECK. These have no need in
+ * destination. Actually, these destinations are used there
+ * as sources for check operation. So, set addr as source.
+ */
+ ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
+
+ if (!addr) {
+ end = list_entry(end->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
+ }
+}
+
+/**
+ * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
+ */
+static inline void ppc440spe_desc_set_xor_src_cnt(
+ struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
+ hw_desc->cbc |= src_cnt;
+}
+
+/**
+ * ppc440spe_adma_pq_set_src - set source address into descriptor
+ */
+static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t haddr = 0;
+ struct ppc440spe_adma_desc_slot *iter = NULL;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
+ */
+ if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ /* RXOR-only or RXOR/WXOR operation */
+ int iskip = test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index == 0) {
+ /* 1st slot (RXOR) */
+ /* setup sources region (R1-2-3, R1-2-4,
+ * or R1-2-5)
+ */
+ if (test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags))
+ haddr = DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR123,
+ &sw_desc->flags))
+ haddr = DMA_RXOR123 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR124,
+ &sw_desc->flags))
+ haddr = DMA_RXOR124 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR125,
+ &sw_desc->flags))
+ haddr = DMA_RXOR125 <<
+ DMA_CUED_REGION_OFF;
+ else
+ BUG();
+ haddr |= DMA_CUED_XOR_BASE;
+ iter = ppc440spe_get_group_entry(sw_desc, 0);
+ } else if (index < iskip) {
+ /* 1st slot (RXOR)
+ * shall actually set source address only once
+ * instead of first <iskip>
+ */
+ iter = NULL;
+ } else {
+ /* 2nd/3d and next slots (WXOR);
+ * skip first slot with RXOR
+ */
+ haddr = DMA_CUED_XOR_HB;
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index - iskip + sw_desc->dst_cnt);
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only operation; skip first slots with
+ * zeroing destinations
+ */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ znum++;
+
+ haddr = DMA_CUED_XOR_HB;
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index + znum);
+ }
+
+ if (likely(iter)) {
+ ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
+
+ if (!index &&
+ test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
+ sw_desc->dst_cnt == 2) {
+ /* if we have two destinations for RXOR, then
+ * setup source in the second descr too
+ */
+ iter = ppc440spe_get_group_entry(sw_desc, 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ haddr, addr);
+ }
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ /* DMA2 may do Biskup */
+ iter = sw_desc->group_head;
+ if (iter->dst_cnt == 2) {
+ /* both P & Q calculations required; set P src here */
+ ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
+
+ /* this is for Q */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
+ */
+static void ppc440spe_adma_memcpy_xor_set_src(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_inc_addr -
+ */
+static void ppc440spe_adma_dma2rxor_inc_addr(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_rxor *cursor, int index, int src_cnt)
+{
+ cursor->addr_count++;
+ if (index == src_cnt - 1) {
+ ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
+ } else if (cursor->addr_count == XOR_MAX_OPS) {
+ ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
+ cursor->addr_count = 0;
+ cursor->desc_count++;
+ }
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
+ */
+static int ppc440spe_adma_dma2rxor_prep_src(
+ struct ppc440spe_adma_desc_slot *hdesc,
+ struct ppc440spe_rxor *cursor, int index,
+ int src_cnt, u32 addr)
+{
+ int rval = 0;
+ u32 sign;
+ struct ppc440spe_adma_desc_slot *desc = hdesc;
+ int i;
+
+ for (i = 0; i < cursor->desc_count; i++) {
+ desc = list_entry(hdesc->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ switch (cursor->state) {
+ case 0:
+ if (addr == cursor->addrl + cursor->len) {
+ /* direct RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ if (index == src_cnt-1) {
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (cursor->addrl == addr + cursor->len) {
+ /* reverse RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ set_bit(cursor->addr_count, &desc->reverse_flags[0]);
+ if (index == src_cnt-1) {
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ printk(KERN_ERR "Cannot build "
+ "DMA2 RXOR command block.\n");
+ BUG();
+ }
+ break;
+ case 1:
+ sign = test_bit(cursor->addr_count,
+ desc->reverse_flags)
+ ? -1 : 1;
+ if (index == src_cnt-2 || (sign == -1
+ && addr != cursor->addrl - 2*cursor->len)) {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ } else if (addr == cursor->addrl + 2*sign*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR123 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 3*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR124 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 4*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR125 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ case 2:
+ cursor->state = 0;
+ cursor->addrl = addr;
+ cursor->xor_count++;
+ if (index) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ }
+
+ return rval;
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
+ * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc440spe_adma_dma2rxor_set_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, dma_addr_t addr)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index addr */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry(desc->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ BUG_ON(k < 1);
+
+ if (test_bit(k-1, desc->reverse_flags)) {
+ /* reverse operand order; put last op in RXOR group */
+ if (index == op - 1)
+ ppc440spe_rxor_set_src(desc, k - 1, addr);
+ } else {
+ /* direct operand order; put first op in RXOR group */
+ if (index == lop)
+ ppc440spe_rxor_set_src(desc, k - 1, addr);
+ }
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
+ * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc440spe_adma_dma2rxor_set_mult(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, u8 mult)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index mult */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry(desc->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ BUG_ON(k < 1);
+ if (test_bit(k-1, desc->reverse_flags)) {
+ /* reverse order */
+ ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
+ } else {
+ /* direct order */
+ ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
+ }
+}
+
+/**
+ * ppc440spe_init_rxor_cursor -
+ */
+static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
+{
+ memset(cursor, 0, sizeof(struct ppc440spe_rxor));
+ cursor->state = 2;
+}
+
+/**
+ * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
+ * descriptor for the PQXOR operation
+ */
+static void ppc440spe_adma_pq_set_src_mult(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ unsigned char mult, int index, int dst_pos)
+{
+ struct ppc440spe_adma_chan *chan;
+ u32 mult_idx, mult_dst;
+ struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ int region = test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index < region) {
+ /* RXOR multipliers */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->dst_cnt - 1);
+ if (sw_desc->dst_cnt == 2)
+ iter1 = ppc440spe_get_group_entry(
+ sw_desc, 0);
+
+ mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
+ mult_dst = DMA_CDB_SG_SRC;
+ } else {
+ /* WXOR multiplier */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index - region +
+ sw_desc->dst_cnt);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only;
+ * skip first slots with destinations (if ZERO_DST has
+ * place)
+ */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ znum++;
+
+ iter = ppc440spe_get_group_entry(sw_desc, index + znum);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
+ }
+
+ if (likely(iter)) {
+ ppc440spe_desc_set_src_mult(iter, chan,
+ mult_idx, mult_dst, mult);
+
+ if (unlikely(iter1)) {
+ /* if we have two destinations for RXOR, then
+ * we've just set Q mult. Set-up P now.
+ */
+ ppc440spe_desc_set_src_mult(iter1, chan,
+ mult_idx, mult_dst, 1);
+ }
+
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ iter = sw_desc->group_head;
+ if (sw_desc->dst_cnt == 2) {
+ /* both P & Q calculations required; set P mult here */
+ ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
+
+ /* and then set Q mult */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_free_chan_resources - free the resources allocated
+ */
+static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *iter, *_iter;
+ int in_use_descs = 0;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ ppc440spe_adma_slot_cleanup(ppc440spe_chan);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &ppc440spe_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ ppc440spe_chan->slots_allocated--;
+ }
+ ppc440spe_chan->last_used = NULL;
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d %s slots_allocated %d\n",
+ ppc440spe_chan->device->id,
+ __func__, ppc440spe_chan->slots_allocated);
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ last_used = chan->cookie;
+ last_complete = ppc440spe_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ ppc440spe_adma_slot_cleanup(ppc440spe_chan);
+
+ last_used = chan->cookie;
+ last_complete = ppc440spe_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/**
+ * ppc440spe_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
+{
+ struct ppc440spe_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc440spe_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc440spe_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
+{
+ struct ppc440spe_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc440spe_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc440spe_test_callback - called when test operation has been done
+ */
+static void ppc440spe_test_callback(void *unused)
+{
+ complete(&ppc440spe_r6_test_comp);
+}
+
+/**
+ * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ __func__, ppc440spe_chan->pending);
+
+ if (ppc440spe_chan->pending) {
+ ppc440spe_chan->pending = 0;
+ ppc440spe_chan_append(ppc440spe_chan);
+ }
+}
+
+/**
+ * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * specific operation)
+ */
+static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ ppc440spe_desc_init_null_xor(group_start);
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(ppc440spe_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ ppc440spe_chan_run(chan);
+ } else
+ printk(KERN_ERR "ppc440spe adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-6
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc, *iter;
+ struct page *pg;
+ char *a;
+ dma_addr_t dma_addr, addrs[2];
+ unsigned long op = 0;
+ int rval = 0;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dsr, int_ena, WXOR */
+ ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
+ iter->unmap_len = PAGE_SIZE;
+ }
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ /* Fill the test page with ones */
+ memset(page_address(pg), 0xFF, PAGE_SIZE);
+ dma_addr = dma_map_page(chan->device->dev, pg, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ /* Setup addresses */
+ ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
+ ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
+ addrs[0] = dma_addr;
+ addrs[1] = 0;
+ ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
+
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = ppc440spe_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&ppc440spe_r6_test_comp);
+
+ ppc440spe_adma_tx_submit(&sw_desc->async_tx);
+ ppc440spe_adma_issue_pending(&chan->common);
+
+ wait_for_completion(&ppc440spe_r6_test_comp);
+
+ /* Now check if the test page is zeroed */
+ a = page_address(pg);
+ if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
+ /* page is zero - RAID-6 enabled */
+ rval = 0;
+ } else {
+ /* RAID-6 was not enabled */
+ rval = -EINVAL;
+ }
+exit:
+ __free_page(pg);
+ return rval;
+}
+
+static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
+{
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
+ dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
+ break;
+ case PPC440SPE_XOR_ID:
+ dma_cap_set(DMA_XOR, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ adev->common.cap_mask = adev->common.cap_mask;
+ break;
+ }
+
+ /* Set base routines */
+ adev->common.device_alloc_chan_resources =
+ ppc440spe_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ ppc440spe_adma_free_chan_resources;
+ adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
+
+ /* Set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ ppc440spe_adma_prep_dma_memcpy;
+ }
+ if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memset =
+ ppc440spe_adma_prep_dma_memset;
+ }
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor =
+ ppc440spe_adma_prep_dma_xor;
+ }
+ if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ dma_set_maxpq(&adev->common,
+ DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
+ break;
+ case PPC440SPE_DMA1_ID:
+ dma_set_maxpq(&adev->common,
+ DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
+ break;
+ case PPC440SPE_XOR_ID:
+ adev->common.max_pq = XOR_MAX_OPS * 3;
+ break;
+ }
+ adev->common.device_prep_dma_pq =
+ ppc440spe_adma_prep_dma_pq;
+ }
+ if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ adev->common.max_pq = DMA0_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ case PPC440SPE_DMA1_ID:
+ adev->common.max_pq = DMA1_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ }
+ adev->common.device_prep_dma_pq_val =
+ ppc440spe_adma_prep_dma_pqzero_sum;
+ }
+ if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ adev->common.max_xor = DMA0_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ case PPC440SPE_DMA1_ID:
+ adev->common.max_xor = DMA1_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ }
+ adev->common.device_prep_dma_xor_val =
+ ppc440spe_adma_prep_dma_xor_zero_sum;
+ }
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ ppc440spe_adma_prep_dma_interrupt;
+ }
+ pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
+ "( %s%s%s%s%s%s%s)\n",
+ dev_name(adev->dev),
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
+}
+
+static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
+ struct ppc440spe_adma_chan *chan,
+ int *initcode)
+{
+ struct device_node *np;
+ int ret;
+
+ np = container_of(adev->dev, struct of_device, dev)->node;
+ if (adev->id != PPC440SPE_XOR_ID) {
+ adev->err_irq = irq_of_parse_and_map(np, 1);
+ if (adev->err_irq == NO_IRQ) {
+ dev_warn(adev->dev, "no err irq resource?\n");
+ *initcode = PPC_ADMA_INIT_IRQ2;
+ adev->err_irq = -ENXIO;
+ } else
+ atomic_inc(&ppc440spe_adma_err_irq_ref);
+ } else {
+ adev->err_irq = -ENXIO;
+ }
+
+ adev->irq = irq_of_parse_and_map(np, 0);
+ if (adev->irq == NO_IRQ) {
+ dev_err(adev->dev, "no irq resource\n");
+ *initcode = PPC_ADMA_INIT_IRQ1;
+ ret = -ENXIO;
+ goto err_irq_map;
+ }
+ dev_dbg(adev->dev, "irq %d, err irq %d\n",
+ adev->irq, adev->err_irq);
+
+ ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
+ 0, dev_driver_string(adev->dev), chan);
+ if (ret) {
+ dev_err(adev->dev, "can't request irq %d\n",
+ adev->irq);
+ *initcode = PPC_ADMA_INIT_IRQ1;
+ ret = -EIO;
+ goto err_req1;
+ }
+
+ /* only DMA engines have a separate error IRQ
+ * so it's Ok if err_irq < 0 in XOR engine case.
+ */
+ if (adev->err_irq > 0) {
+ /* both DMA engines share common error IRQ */
+ ret = request_irq(adev->err_irq,
+ ppc440spe_adma_err_handler,
+ IRQF_SHARED,
+ dev_driver_string(adev->dev),
+ chan);
+ if (ret) {
+ dev_err(adev->dev, "can't request irq %d\n",
+ adev->err_irq);
+ *initcode = PPC_ADMA_INIT_IRQ2;
+ ret = -EIO;
+ goto err_req2;
+ }
+ }
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ /* enable XOR engine interrupts */
+ iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
+ XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
+ &adev->xor_reg->ier);
+ } else {
+ u32 mask, enable;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
+ if (!np) {
+ pr_err("%s: can't find I2O device tree node\n",
+ __func__);
+ ret = -ENODEV;
+ goto err_req2;
+ }
+ adev->i2o_reg = of_iomap(np, 0);
+ if (!adev->i2o_reg) {
+ pr_err("%s: failed to map I2O registers\n", __func__);
+ of_node_put(np);
+ ret = -EINVAL;
+ goto err_req2;
+ }
+ of_node_put(np);
+ /* Unmask 'CS FIFO Attention' interrupts and
+ * enable generating interrupts on errors
+ */
+ enable = (adev->id == PPC440SPE_DMA0_ID) ?
+ ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
+ ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
+ mask = ioread32(&adev->i2o_reg->iopim) & enable;
+ iowrite32(mask, &adev->i2o_reg->iopim);
+ }
+ return 0;
+
+err_req2:
+ free_irq(adev->irq, chan);
+err_req1:
+ irq_dispose_mapping(adev->irq);
+err_irq_map:
+ if (adev->err_irq > 0) {
+ if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
+ irq_dispose_mapping(adev->err_irq);
+ }
+ return ret;
+}
+
+static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
+ struct ppc440spe_adma_chan *chan)
+{
+ u32 mask, disable;
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ /* disable XOR engine interrupts */
+ mask = ioread32be(&adev->xor_reg->ier);
+ mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
+ XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
+ iowrite32be(mask, &adev->xor_reg->ier);
+ } else {
+ /* disable DMAx engine interrupts */
+ disable = (adev->id == PPC440SPE_DMA0_ID) ?
+ (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
+ (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
+ mask = ioread32(&adev->i2o_reg->iopim) | disable;
+ iowrite32(mask, &adev->i2o_reg->iopim);
+ }
+ free_irq(adev->irq, chan);
+ irq_dispose_mapping(adev->irq);
+ if (adev->err_irq > 0) {
+ free_irq(adev->err_irq, chan);
+ if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
+ irq_dispose_mapping(adev->err_irq);
+ iounmap(adev->i2o_reg);
+ }
+ }
+}
+
+/**
+ * ppc440spe_adma_probe - probe the asynch device
+ */
+static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct ppc440spe_adma_device *adev;
+ struct ppc440spe_adma_chan *chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ int ret = 0, initcode = PPC_ADMA_INIT_OK;
+ const u32 *idx;
+ int len;
+ void *regs;
+ u32 id, pool_size;
+
+ if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
+ id = PPC440SPE_XOR_ID;
+ /* As far as the XOR engine is concerned, it does not
+ * use FIFOs but uses linked list. So there is no dependency
+ * between pool size to allocate and the engine configuration.
+ */
+ pool_size = PAGE_SIZE << 1;
+ } else {
+ /* it is DMA0 or DMA1 */
+ idx = of_get_property(np, "cell-index", &len);
+ if (!idx || (len != sizeof(u32))) {
+ dev_err(&ofdev->dev, "Device node %s has missing "
+ "or invalid cell-index property\n",
+ np->full_name);
+ return -EINVAL;
+ }
+ id = *idx;
+ /* DMA0,1 engines use FIFO to maintain CDBs, so we
+ * should allocate the pool accordingly to size of this
+ * FIFO. Thus, the pool size depends on the FIFO depth:
+ * how much CDBs pointers the FIFO may contain then so
+ * much CDBs we should provide in the pool.
+ * That is
+ * CDB size = 32B;
+ * CDBs number = (DMA0_FIFO_SIZE >> 3);
+ * Pool size = CDBs number * CDB size =
+ * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
+ */
+ pool_size = (id == PPC440SPE_DMA0_ID) ?
+ DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
+ pool_size <<= 2;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(&ofdev->dev, "failed to get memory resource\n");
+ initcode = PPC_ADMA_INIT_MEMRES;
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ dev_driver_string(&ofdev->dev))) {
+ dev_err(&ofdev->dev, "failed to request memory region "
+ "(0x%016llx-0x%016llx)\n",
+ (u64)res.start, (u64)res.end);
+ initcode = PPC_ADMA_INIT_MEMREG;
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* create a device */
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev) {
+ dev_err(&ofdev->dev, "failed to allocate device\n");
+ initcode = PPC_ADMA_INIT_ALLOC;
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+
+ adev->id = id;
+ adev->pool_size = pool_size;
+ /* allocate coherent memory for hardware descriptors */
+ adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ adev->pool_size, &adev->dma_desc_pool,
+ GFP_KERNEL);
+ if (adev->dma_desc_pool_virt == NULL) {
+ dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
+ "memory for hardware descriptors\n",
+ adev->pool_size);
+ initcode = PPC_ADMA_INIT_COHERENT;
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+ dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n",
+ adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
+
+ regs = ioremap(res.start, resource_size(&res));
+ if (!regs) {
+ dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+ goto err_regs_alloc;
+ }
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ adev->xor_reg = regs;
+ /* Reset XOR */
+ iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
+ iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
+ } else {
+ size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
+ DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
+ adev->dma_reg = regs;
+ /* DMAx_FIFO_SIZE is defined in bytes,
+ * <fsiz> - is defined in number of CDB pointers (8byte).
+ * DMA FIFO Length = CSlength + CPlength, where
+ * CSlength = CPlength = (fsiz + 1) * 8.
+ */
+ iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
+ &adev->dma_reg->fsiz);
+ /* Configure DMA engine */
+ iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
+ &adev->dma_reg->cfg);
+ /* Clear Status */
+ iowrite32(~0, &adev->dma_reg->dsts);
+ }
+
+ adev->dev = &ofdev->dev;
+ adev->common.dev = &ofdev->dev;
+ INIT_LIST_HEAD(&adev->common.channels);
+ dev_set_drvdata(&ofdev->dev, adev);
+
+ /* create a channel */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(&ofdev->dev, "can't allocate channel structure\n");
+ initcode = PPC_ADMA_INIT_CHANNEL;
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->device = adev;
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+ tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
+ (unsigned long)chan);
+
+ /* allocate and map helper pages for async validation or
+ * async_mult/async_sum_product operations on DMA0/1.
+ */
+ if (adev->id != PPC440SPE_XOR_ID) {
+ chan->pdest_page = alloc_page(GFP_KERNEL);
+ chan->qdest_page = alloc_page(GFP_KERNEL);
+ if (!chan->pdest_page ||
+ !chan->qdest_page) {
+ if (chan->pdest_page)
+ __free_page(chan->pdest_page);
+ if (chan->qdest_page)
+ __free_page(chan->qdest_page);
+ ret = -ENOMEM;
+ goto err_page_alloc;
+ }
+ chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (ref) {
+ ref->chan = &chan->common;
+ INIT_LIST_HEAD(&ref->node);
+ list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
+ } else {
+ dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
+ ret = -ENOMEM;
+ goto err_ref_alloc;
+ }
+
+ ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
+ if (ret)
+ goto err_irq;
+
+ ppc440spe_adma_init_capabilities(adev);
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ initcode = PPC_ADMA_INIT_REGISTER;
+ dev_err(&ofdev->dev, "failed to register dma device\n");
+ goto err_dev_reg;
+ }
+
+ goto out;
+
+err_dev_reg:
+ ppc440spe_adma_release_irqs(adev, chan);
+err_irq:
+ list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
+ if (chan == to_ppc440spe_adma_chan(ref->chan)) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ }
+err_ref_alloc:
+ if (adev->id != PPC440SPE_XOR_ID) {
+ dma_unmap_page(&ofdev->dev, chan->pdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&ofdev->dev, chan->qdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(chan->pdest_page);
+ __free_page(chan->qdest_page);
+ }
+err_page_alloc:
+ kfree(chan);
+err_chan_alloc:
+ if (adev->id == PPC440SPE_XOR_ID)
+ iounmap(adev->xor_reg);
+ else
+ iounmap(adev->dma_reg);
+err_regs_alloc:
+ dma_free_coherent(adev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt,
+ adev->dma_desc_pool);
+err_dma_alloc:
+ kfree(adev);
+err_adev_alloc:
+ release_mem_region(res.start, resource_size(&res));
+out:
+ if (id < PPC440SPE_ADMA_ENGINES_NUM)
+ ppc440spe_adma_devices[id] = initcode;
+
+ return ret;
+}
+
+/**
+ * ppc440spe_adma_remove - remove the asynch device
+ */
+static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
+{
+ struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct dma_chan *chan, *_chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+ if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
+ ppc440spe_adma_devices[adev->id] = -1;
+
+ dma_async_device_unregister(&adev->common);
+
+ list_for_each_entry_safe(chan, _chan, &adev->common.channels,
+ device_node) {
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
+ tasklet_kill(&ppc440spe_chan->irq_tasklet);
+ if (adev->id != PPC440SPE_XOR_ID) {
+ dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(ppc440spe_chan->pdest_page);
+ __free_page(ppc440spe_chan->qdest_page);
+ }
+ list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
+ node) {
+ if (ppc440spe_chan ==
+ to_ppc440spe_adma_chan(ref->chan)) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ }
+ list_del(&chan->device_node);
+ kfree(ppc440spe_chan);
+ }
+
+ dma_free_coherent(adev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+ if (adev->id == PPC440SPE_XOR_ID)
+ iounmap(adev->xor_reg);
+ else
+ iounmap(adev->dma_reg);
+ of_address_to_resource(np, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+ kfree(adev);
+ return 0;
+}
+
+/*
+ * /sys driver interface to enable h/w RAID-6 capabilities
+ * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
+ * directory are "devices", "enable" and "poly".
+ * "devices" shows available engines.
+ * "enable" is used to enable RAID-6 capabilities or to check
+ * whether these has been activated.
+ * "poly" allows setting/checking used polynomial (for PPC440SPe only).
+ */
+
+static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
+{
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
+ if (ppc440spe_adma_devices[i] == -1)
+ continue;
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "PPC440SP(E)-ADMA.%d: %s\n", i,
+ ppc_adma_errors[ppc440spe_adma_devices[i]]);
+ }
+ return size;
+}
+
+static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
+ ppc440spe_r6_enabled ? "EN" : "DIS");
+}
+
+static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (!count || count > 11)
+ return -EINVAL;
+
+ if (!ppc440spe_r6_tchan)
+ return -EFAULT;
+
+ /* Write a key */
+ sscanf(buf, "%lx", &val);
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
+ isync();
+
+ /* Verify whether it really works now */
+ if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
+ pr_info("PPC440SP(e) RAID-6 has been activated "
+ "successfully\n");
+ ppc440spe_r6_enabled = 1;
+ } else {
+ pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
+ " Error key ?\n");
+ ppc440spe_r6_enabled = 0;
+ }
+ return count;
+}
+
+static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
+{
+ ssize_t size = 0;
+ u32 reg;
+
+#ifdef CONFIG_440SP
+ /* 440SP has fixed polynomial */
+ reg = 0x4d;
+#else
+ reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
+ reg >>= MQ0_CFBHL_POLY;
+ reg &= 0xFF;
+#endif
+
+ size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
+ "uses 0x1%02x polynomial.\n", reg);
+ return size;
+}
+
+static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
+ const char *buf, size_t count)
+{
+ unsigned long reg, val;
+
+#ifdef CONFIG_440SP
+ /* 440SP uses default 0x14D polynomial only */
+ return -EINVAL;
+#endif
+
+ if (!count || count > 6)
+ return -EINVAL;
+
+ /* e.g., 0x14D or 0x11D */
+ sscanf(buf, "%lx", &val);
+
+ if (val & ~0x1FF)
+ return -EINVAL;
+
+ val &= 0xFF;
+ reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
+ reg &= ~(0xFF << MQ0_CFBHL_POLY);
+ reg |= val << MQ0_CFBHL_POLY;
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
+
+ return count;
+}
+
+static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
+static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
+ store_ppc440spe_r6enable);
+static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
+ store_ppc440spe_r6poly);
+
+/*
+ * Common initialisation for RAID engines; allocate memory for
+ * DMAx FIFOs, perform configuration common for all DMA engines.
+ * Further DMA engine specific configuration is done at probe time.
+ */
+static int ppc440spe_configure_raid_devices(void)
+{
+ struct device_node *np;
+ struct resource i2o_res;
+ struct i2o_regs __iomem *i2o_reg;
+ dcr_host_t i2o_dcr_host;
+ unsigned int dcr_base, dcr_len;
+ int i, ret;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
+ if (!np) {
+ pr_err("%s: can't find I2O device tree node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(np, 0, &i2o_res)) {
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ i2o_reg = of_iomap(np, 0);
+ if (!i2o_reg) {
+ pr_err("%s: failed to map I2O registers\n", __func__);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ /* Get I2O DCRs base */
+ dcr_base = dcr_resource_start(np, 0);
+ dcr_len = dcr_resource_len(np, 0);
+ if (!dcr_base && !dcr_len) {
+ pr_err("%s: can't get DCR registers base/len!\n",
+ np->full_name);
+ of_node_put(np);
+ iounmap(i2o_reg);
+ return -ENODEV;
+ }
+
+ i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(i2o_dcr_host)) {
+ pr_err("%s: failed to map DCRs!\n", np->full_name);
+ of_node_put(np);
+ iounmap(i2o_reg);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
+ * the base address of FIFO memory space.
+ * Actually we need twice more physical memory than programmed in the
+ * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
+ */
+ ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
+ GFP_KERNEL);
+ if (!ppc440spe_dma_fifo_buf) {
+ pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
+ iounmap(i2o_reg);
+ dcr_unmap(i2o_dcr_host, dcr_len);
+ return -ENOMEM;
+ }
+
+ /*
+ * Configure h/w
+ */
+ /* Reset I2O/DMA */
+ mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
+ mtdcri(SDR0, DCRN_SDR0_SRST, 0);
+
+ /* Setup the base address of mmaped registers */
+ dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
+ dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
+ I2O_REG_ENABLE);
+ dcr_unmap(i2o_dcr_host, dcr_len);
+
+ /* Setup FIFO memory space base address */
+ iowrite32(0, &i2o_reg->ifbah);
+ iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
+
+ /* set zero FIFO size for I2O, so the whole
+ * ppc440spe_dma_fifo_buf is used by DMAs.
+ * DMAx_FIFOs will be configured while probe.
+ */
+ iowrite32(0, &i2o_reg->ifsiz);
+ iounmap(i2o_reg);
+
+ /* To prepare WXOR/RXOR functionality we need access to
+ * Memory Queue Module DCRs (finally it will be enabled
+ * via /sys interface of the ppc440spe ADMA driver).
+ */
+ np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
+ if (!np) {
+ pr_err("%s: can't find MQ device tree node\n",
+ __func__);
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ /* Get MQ DCRs base */
+ dcr_base = dcr_resource_start(np, 0);
+ dcr_len = dcr_resource_len(np, 0);
+ if (!dcr_base && !dcr_len) {
+ pr_err("%s: can't get DCR registers base/len!\n",
+ np->full_name);
+ ret = -ENODEV;
+ goto out_mq;
+ }
+
+ ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
+ pr_err("%s: failed to map DCRs!\n", np->full_name);
+ ret = -ENODEV;
+ goto out_mq;
+ }
+ of_node_put(np);
+ ppc440spe_mq_dcr_len = dcr_len;
+
+ /* Set HB alias */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
+
+ /* Set:
+ * - LL transaction passing limit to 1;
+ * - Memory controller cycle limit to 1;
+ * - Galois Polynomial to 0x14d (default)
+ */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
+ (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
+ (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
+
+ atomic_set(&ppc440spe_adma_err_irq_ref, 0);
+ for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
+ ppc440spe_adma_devices[i] = -1;
+
+ return 0;
+
+out_mq:
+ of_node_put(np);
+out_free:
+ kfree(ppc440spe_dma_fifo_buf);
+ return ret;
+}
+
+static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
+ { .compatible = "ibm,dma-440spe", },
+ { .compatible = "amcc,xor-accelerator", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
+
+static struct of_platform_driver ppc440spe_adma_driver = {
+ .match_table = ppc440spe_adma_of_match,
+ .probe = ppc440spe_adma_probe,
+ .remove = __devexit_p(ppc440spe_adma_remove),
+ .driver = {
+ .name = "PPC440SP(E)-ADMA",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int ppc440spe_adma_init(void)
+{
+ int ret;
+
+ ret = ppc440spe_configure_raid_devices();
+ if (ret)
+ return ret;
+
+ ret = of_register_platform_driver(&ppc440spe_adma_driver);
+ if (ret) {
+ pr_err("%s: failed to register platform driver\n",
+ __func__);
+ goto out_reg;
+ }
+
+ /* Initialization status */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+ if (ret)
+ goto out_dev;
+
+ /* RAID-6 h/w enable entry */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+ if (ret)
+ goto out_en;
+
+ /* GF polynomial to use */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_poly);
+ if (!ret)
+ return ret;
+
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+out_en:
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+out_dev:
+ /* User will not be able to enable h/w RAID-6 */
+ pr_err("%s: failed to create RAID-6 driver interface\n",
+ __func__);
+ of_unregister_platform_driver(&ppc440spe_adma_driver);
+out_reg:
+ dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
+ kfree(ppc440spe_dma_fifo_buf);
+ return ret;
+}
+
+static void __exit ppc440spe_adma_exit(void)
+{
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_poly);
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+ of_unregister_platform_driver(&ppc440spe_adma_driver);
+ dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
+ kfree(ppc440spe_dma_fifo_buf);
+}
+
+arch_initcall(ppc440spe_adma_init);
+module_exit(ppc440spe_adma_exit);
+
+MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
+MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
new file mode 100644
index 0000000..8ada5a81
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.h
@@ -0,0 +1,195 @@
+/*
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_ADMA_H
+#define _PPC440SPE_ADMA_H
+
+#include <linux/types.h>
+#include "dma.h"
+#include "xor.h"
+
+#define to_ppc440spe_adma_chan(chan) \
+ container_of(chan, struct ppc440spe_adma_chan, common)
+#define to_ppc440spe_adma_device(dev) \
+ container_of(dev, struct ppc440spe_adma_device, common)
+#define tx_to_ppc440spe_adma_slot(tx) \
+ container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
+
+/* Default polynomial (for 440SP is only available) */
+#define PPC440SPE_DEFAULT_POLY 0x4d
+
+#define PPC440SPE_ADMA_ENGINES_NUM (XOR_ENGINES_NUM + DMA_ENGINES_NUM)
+
+#define PPC440SPE_ADMA_WATCHDOG_MSEC 3
+#define PPC440SPE_ADMA_THRESHOLD 1
+
+#define PPC440SPE_DMA0_ID 0
+#define PPC440SPE_DMA1_ID 1
+#define PPC440SPE_XOR_ID 2
+
+#define PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT 0xFFFFFFUL
+/* this is the XOR_CBBCR width */
+#define PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
+#define PPC440SPE_ADMA_ZERO_SUM_MAX_BYTE_COUNT PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
+
+#define PPC440SPE_RXOR_RUN 0
+
+#define MQ0_CF2H_RXOR_BS_MASK 0x1FF
+
+#undef ADMA_LL_DEBUG
+
+/**
+ * struct ppc440spe_adma_device - internal representation of an ADMA device
+ * @dev: device
+ * @dma_reg: base for DMAx register access
+ * @xor_reg: base for XOR register access
+ * @i2o_reg: base for I2O register access
+ * @id: HW ADMA Device selector
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @pool_size: size of the pool
+ * @irq: DMAx or XOR irq number
+ * @err_irq: DMAx error irq number
+ * @common: embedded struct dma_device
+ */
+struct ppc440spe_adma_device {
+ struct device *dev;
+ struct dma_regs __iomem *dma_reg;
+ struct xor_regs __iomem *xor_reg;
+ struct i2o_regs __iomem *i2o_reg;
+ int id;
+ void *dma_desc_pool_virt;
+ dma_addr_t dma_desc_pool;
+ size_t pool_size;
+ int irq;
+ int err_irq;
+ struct dma_device common;
+};
+
+/**
+ * struct ppc440spe_adma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ * @pdest_page: P destination page for async validate operation
+ * @qdest_page: Q destination page for async validate operation
+ * @pdest: P dma addr for async validate operation
+ * @qdest: Q dma addr for async validate operation
+ */
+struct ppc440spe_adma_chan {
+ spinlock_t lock;
+ struct ppc440spe_adma_device *device;
+ struct list_head chain;
+ struct dma_chan common;
+ struct list_head all_slots;
+ struct ppc440spe_adma_desc_slot *last_used;
+ int pending;
+ dma_cookie_t completed_cookie;
+ int slots_allocated;
+ int hw_chain_inited;
+ struct tasklet_struct irq_tasklet;
+ u8 needs_unmap;
+ struct page *pdest_page;
+ struct page *qdest_page;
+ dma_addr_t pdest;
+ dma_addr_t qdest;
+};
+
+struct ppc440spe_rxor {
+ u32 addrl;
+ u32 addrh;
+ int len;
+ int xor_count;
+ int addr_count;
+ int desc_count;
+ int state;
+};
+
+/**
+ * struct ppc440spe_adma_desc_slot - PPC440SPE-ADMA software descriptor
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @hw_next: pointer to the next descriptor in chain
+ * @async_tx: support for the async_tx api
+ * @slot_node: node on the iop_adma_chan.all_slots list
+ * @chain_node: node on the op_adma_chan.chain list
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ * for example transfer lengths larger than the supported hw max
+ * @unmap_len: transaction bytecount
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @stride: currently chained or not
+ * @idx: pool index
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @src_cnt: number of sources set in this descriptor
+ * @dst_cnt: number of destinations set in the descriptor
+ * @slots_per_op: number of slots per operation
+ * @descs_per_op: number of slot per P/Q operation see comment
+ * for ppc440spe_prep_dma_pqxor function
+ * @flags: desc state/type
+ * @reverse_flags: 1 if a corresponding rxor address uses reversed address order
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct ppc440spe_adma_desc_slot {
+ dma_addr_t phys;
+ struct ppc440spe_adma_desc_slot *group_head;
+ struct ppc440spe_adma_desc_slot *hw_next;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head slot_node;
+ struct list_head chain_node; /* node in channel ops list */
+ struct list_head group_list; /* list */
+ unsigned int unmap_len;
+ void *hw_desc;
+ u16 stride;
+ u16 idx;
+ u16 slot_cnt;
+ u8 src_cnt;
+ u8 dst_cnt;
+ u8 slots_per_op;
+ u8 descs_per_op;
+ unsigned long flags;
+ unsigned long reverse_flags[8];
+
+#define PPC440SPE_DESC_INT 0 /* generate interrupt on complete */
+#define PPC440SPE_ZERO_P 1 /* clear P destionaion */
+#define PPC440SPE_ZERO_Q 2 /* clear Q destination */
+#define PPC440SPE_COHERENT 3 /* src/dst are coherent */
+
+#define PPC440SPE_DESC_WXOR 4 /* WXORs are in chain */
+#define PPC440SPE_DESC_RXOR 5 /* RXOR is in chain */
+
+#define PPC440SPE_DESC_RXOR123 8 /* CDB for RXOR123 operation */
+#define PPC440SPE_DESC_RXOR124 9 /* CDB for RXOR124 operation */
+#define PPC440SPE_DESC_RXOR125 10 /* CDB for RXOR125 operation */
+#define PPC440SPE_DESC_RXOR12 11 /* CDB for RXOR12 operation */
+#define PPC440SPE_DESC_RXOR_REV 12 /* CDB has srcs in reversed order */
+
+#define PPC440SPE_DESC_PCHECK 13
+#define PPC440SPE_DESC_QCHECK 14
+
+#define PPC440SPE_DESC_RXOR_MSK 0x3
+
+ struct ppc440spe_rxor rxor_cursor;
+
+ union {
+ u32 *xor_check_result;
+ u32 *crc32_result;
+ };
+};
+
+#endif /* _PPC440SPE_ADMA_H */
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
new file mode 100644
index 0000000..bcde2df
--- /dev/null
+++ b/drivers/dma/ppc4xx/dma.h
@@ -0,0 +1,223 @@
+/*
+ * 440SPe's DMA engines support header file
+ *
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the term of the GNU General Public License
+ * version 2. The program licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_DMA_H
+#define _PPC440SPE_DMA_H
+
+#include <linux/types.h>
+
+/* Number of elements in the array with statical CDBs */
+#define MAX_STAT_DMA_CDBS 16
+/* Number of DMA engines available on the contoller */
+#define DMA_ENGINES_NUM 2
+
+/* Maximum h/w supported number of destinations */
+#define DMA_DEST_MAX_NUM 2
+
+/* FIFO's params */
+#define DMA0_FIFO_SIZE 0x1000
+#define DMA1_FIFO_SIZE 0x1000
+#define DMA_FIFO_ENABLE (1<<12)
+
+/* DMA Configuration Register. Data Transfer Engine PLB Priority: */
+#define DMA_CFG_DXEPR_LP (0<<26)
+#define DMA_CFG_DXEPR_HP (3<<26)
+#define DMA_CFG_DXEPR_HHP (2<<26)
+#define DMA_CFG_DXEPR_HHHP (1<<26)
+
+/* DMA Configuration Register. DMA FIFO Manager PLB Priority: */
+#define DMA_CFG_DFMPP_LP (0<<23)
+#define DMA_CFG_DFMPP_HP (3<<23)
+#define DMA_CFG_DFMPP_HHP (2<<23)
+#define DMA_CFG_DFMPP_HHHP (1<<23)
+
+/* DMA Configuration Register. Force 64-byte Alignment */
+#define DMA_CFG_FALGN (1 << 19)
+
+/*UIC0:*/
+#define D0CPF_INT (1<<12)
+#define D0CSF_INT (1<<11)
+#define D1CPF_INT (1<<10)
+#define D1CSF_INT (1<<9)
+/*UIC1:*/
+#define DMAE_INT (1<<9)
+
+/* I2O IOP Interrupt Mask Register */
+#define I2O_IOPIM_P0SNE (1<<3)
+#define I2O_IOPIM_P0EM (1<<5)
+#define I2O_IOPIM_P1SNE (1<<6)
+#define I2O_IOPIM_P1EM (1<<8)
+
+/* DMA CDB fields */
+#define DMA_CDB_MSK (0xF)
+#define DMA_CDB_64B_ADDR (1<<2)
+#define DMA_CDB_NO_INT (1<<3)
+#define DMA_CDB_STATUS_MSK (0x3)
+#define DMA_CDB_ADDR_MSK (0xFFFFFFF0)
+
+/* DMA CDB OpCodes */
+#define DMA_CDB_OPC_NO_OP (0x00)
+#define DMA_CDB_OPC_MV_SG1_SG2 (0x01)
+#define DMA_CDB_OPC_MULTICAST (0x05)
+#define DMA_CDB_OPC_DFILL128 (0x24)
+#define DMA_CDB_OPC_DCHECK128 (0x23)
+
+#define DMA_CUED_XOR_BASE (0x10000000)
+#define DMA_CUED_XOR_HB (0x00000008)
+
+#ifdef CONFIG_440SP
+#define DMA_CUED_MULT1_OFF 0
+#define DMA_CUED_MULT2_OFF 8
+#define DMA_CUED_MULT3_OFF 16
+#define DMA_CUED_REGION_OFF 24
+#define DMA_CUED_XOR_WIN_MSK (0xFC000000)
+#else
+#define DMA_CUED_MULT1_OFF 2
+#define DMA_CUED_MULT2_OFF 10
+#define DMA_CUED_MULT3_OFF 18
+#define DMA_CUED_REGION_OFF 26
+#define DMA_CUED_XOR_WIN_MSK (0xF0000000)
+#endif
+
+#define DMA_CUED_REGION_MSK 0x3
+#define DMA_RXOR123 0x0
+#define DMA_RXOR124 0x1
+#define DMA_RXOR125 0x2
+#define DMA_RXOR12 0x3
+
+/* S/G addresses */
+#define DMA_CDB_SG_SRC 1
+#define DMA_CDB_SG_DST1 2
+#define DMA_CDB_SG_DST2 3
+
+/*
+ * DMAx engines Command Descriptor Block Type
+ */
+struct dma_cdb {
+ /*
+ * Basic CDB structure (Table 20-17, p.499, 440spe_um_1_22.pdf)
+ */
+ u8 pad0[2]; /* reserved */
+ u8 attr; /* attributes */
+ u8 opc; /* opcode */
+ u32 sg1u; /* upper SG1 address */
+ u32 sg1l; /* lower SG1 address */
+ u32 cnt; /* SG count, 3B used */
+ u32 sg2u; /* upper SG2 address */
+ u32 sg2l; /* lower SG2 address */
+ u32 sg3u; /* upper SG3 address */
+ u32 sg3l; /* lower SG3 address */
+};
+
+/*
+ * DMAx hardware registers (p.515 in 440SPe UM 1.22)
+ */
+struct dma_regs {
+ u32 cpfpl;
+ u32 cpfph;
+ u32 csfpl;
+ u32 csfph;
+ u32 dsts;
+ u32 cfg;
+ u8 pad0[0x8];
+ u16 cpfhp;
+ u16 cpftp;
+ u16 csfhp;
+ u16 csftp;
+ u8 pad1[0x8];
+ u32 acpl;
+ u32 acph;
+ u32 s1bpl;
+ u32 s1bph;
+ u32 s2bpl;
+ u32 s2bph;
+ u32 s3bpl;
+ u32 s3bph;
+ u8 pad2[0x10];
+ u32 earl;
+ u32 earh;
+ u8 pad3[0x8];
+ u32 seat;
+ u32 sead;
+ u32 op;
+ u32 fsiz;
+};
+
+/*
+ * I2O hardware registers (p.528 in 440SPe UM 1.22)
+ */
+struct i2o_regs {
+ u32 ists;
+ u32 iseat;
+ u32 isead;
+ u8 pad0[0x14];
+ u32 idbel;
+ u8 pad1[0xc];
+ u32 ihis;
+ u32 ihim;
+ u8 pad2[0x8];
+ u32 ihiq;
+ u32 ihoq;
+ u8 pad3[0x8];
+ u32 iopis;
+ u32 iopim;
+ u32 iopiq;
+ u8 iopoq;
+ u8 pad4[3];
+ u16 iiflh;
+ u16 iiflt;
+ u16 iiplh;
+ u16 iiplt;
+ u16 ioflh;
+ u16 ioflt;
+ u16 ioplh;
+ u16 ioplt;
+ u32 iidc;
+ u32 ictl;
+ u32 ifcpp;
+ u8 pad5[0x4];
+ u16 mfac0;
+ u16 mfac1;
+ u16 mfac2;
+ u16 mfac3;
+ u16 mfac4;
+ u16 mfac5;
+ u16 mfac6;
+ u16 mfac7;
+ u16 ifcfh;
+ u16 ifcht;
+ u8 pad6[0x4];
+ u32 iifmc;
+ u32 iodb;
+ u32 iodbc;
+ u32 ifbal;
+ u32 ifbah;
+ u32 ifsiz;
+ u32 ispd0;
+ u32 ispd1;
+ u32 ispd2;
+ u32 ispd3;
+ u32 ihipl;
+ u32 ihiph;
+ u32 ihopl;
+ u32 ihoph;
+ u32 iiipl;
+ u32 iiiph;
+ u32 iiopl;
+ u32 iioph;
+ u32 ifcpl;
+ u32 ifcph;
+ u8 pad7[0x8];
+ u32 iopt;
+};
+
+#endif /* _PPC440SPE_DMA_H */
diff --git a/drivers/dma/ppc4xx/xor.h b/drivers/dma/ppc4xx/xor.h
new file mode 100644
index 0000000..daed738
--- /dev/null
+++ b/drivers/dma/ppc4xx/xor.h
@@ -0,0 +1,110 @@
+/*
+ * 440SPe's XOR engines support header file
+ *
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the term of the GNU General Public License
+ * version 2. The program licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_XOR_H
+#define _PPC440SPE_XOR_H
+
+#include <linux/types.h>
+
+/* Number of XOR engines available on the contoller */
+#define XOR_ENGINES_NUM 1
+
+/* Number of operands supported in the h/w */
+#define XOR_MAX_OPS 16
+
+/*
+ * XOR Command Block Control Register bits
+ */
+#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
+#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
+#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
+#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
+#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
+#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
+
+/*
+ * XORCore Status Register bits
+ */
+#define XOR_SR_XCP_BIT (1<<31) /* core processing */
+#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
+#define XOR_SR_IC_BIT (1<<16) /* invalid command */
+#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
+#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
+#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
+#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
+
+/*
+ * XORCore Control Set and Reset Register bits
+ */
+#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
+#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
+#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
+#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
+#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
+#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
+
+/*
+ * XORCore Interrupt Enable Register
+ */
+#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block IRQ Enable */
+#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command IRQ Enable */
+#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error IRQ Enable */
+#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
+#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
+
+/*
+ * XOR Accelerator engine Command Block Type
+ */
+struct xor_cb {
+ /*
+ * Basic 64-bit format XOR CB (Table 19-1, p.463, 440spe_um_1_22.pdf)
+ */
+ u32 cbc; /* control */
+ u32 cbbc; /* byte count */
+ u32 cbs; /* status */
+ u8 pad0[4]; /* reserved */
+ u32 cbtah; /* target address high */
+ u32 cbtal; /* target address low */
+ u32 cblah; /* link address high */
+ u32 cblal; /* link address low */
+ struct {
+ u32 h;
+ u32 l;
+ } __attribute__ ((packed)) ops[16];
+} __attribute__ ((packed));
+
+/*
+ * XOR hardware registers Table 19-3, UM 1.22
+ */
+struct xor_regs {
+ u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
+ u8 pad0[352]; /* reserved */
+ u32 cbcr; /* CB control register */
+ u32 cbbcr; /* CB byte count register */
+ u32 cbsr; /* CB status register */
+ u8 pad1[4]; /* reserved */
+ u32 cbtahr; /* operand target address high register */
+ u32 cbtalr; /* operand target address low register */
+ u32 cblahr; /* CB link address high register */
+ u32 cblalr; /* CB link address low register */
+ u32 crsr; /* control set register */
+ u32 crrr; /* control reset register */
+ u32 ccbahr; /* current CB address high register */
+ u32 ccbalr; /* current CB address low register */
+ u32 plbr; /* PLB configuration register */
+ u32 ier; /* interrupt enable register */
+ u32 pecr; /* parity error count register */
+ u32 sr; /* status register */
+ u32 revidr; /* revision ID register */
+};
+
+#endif /* _PPC440SPE_XOR_H */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 034ecf0..d10cc89 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,16 +23,19 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
#include <linux/platform_device.h>
#include <cpu/dma.h>
#include <asm/dma-sh.h>
#include "shdma.h"
/* DMA descriptor control */
-#define DESC_LAST (-1)
-#define DESC_COMP (1)
-#define DESC_NCOMP (0)
+enum sh_dmae_desc_status {
+ DESC_IDLE,
+ DESC_PREPARED,
+ DESC_SUBMITTED,
+ DESC_COMPLETED, /* completed, have to call callback */
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
+};
#define NR_DESCS_PER_CHANNEL 32
/*
@@ -45,6 +48,8 @@
*/
#define RS_DEFAULT (RS_DUAL)
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+
#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -80,17 +85,17 @@ static int sh_dmae_rst(int id)
unsigned short dmaor;
sh_dmae_ctl_stop(id);
- dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
+ dmaor = dmaor_read_reg(id) | DMAOR_INIT;
dmaor_write_reg(id, dmaor);
- if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
+ if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
-static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
+static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
if (chcr & CHCR_DE) {
@@ -106,19 +111,18 @@ static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
}
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
{
- sh_dmae_writel(sh_chan, hw.sar, SAR);
- sh_dmae_writel(sh_chan, hw.dar, DAR);
- sh_dmae_writel(sh_chan,
- (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
+ sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
- chcr |= (CHCR_DE|CHCR_IE);
+ chcr |= CHCR_DE | CHCR_IE;
sh_dmae_writel(sh_chan, chcr, CHCR);
}
@@ -132,7 +136,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
- int ret = dmae_is_idle(sh_chan);
+ int ret = dmae_is_busy(sh_chan);
/* When DMA was working, can not set data to CHCR */
if (ret)
return ret;
@@ -149,7 +153,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
u32 addr;
int shift = 0;
- int ret = dmae_is_idle(sh_chan);
+ int ret = dmae_is_busy(sh_chan);
if (ret)
return ret;
@@ -185,8 +189,9 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct sh_desc *desc = tx_to_sh_desc(tx);
+ struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+ dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
spin_lock_bh(&sh_chan->desc_lock);
@@ -196,45 +201,53 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
if (cookie < 0)
cookie = 1;
- /* If desc only in the case of 1 */
- if (desc->async_tx.cookie != -EBUSY)
- desc->async_tx.cookie = cookie;
- sh_chan->common.cookie = desc->async_tx.cookie;
+ sh_chan->common.cookie = cookie;
+ tx->cookie = cookie;
+
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+ /*
+ * All chunks are on the global ld_free, so, we have to find
+ * the end of the chain ourselves
+ */
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
+ chunk->async_tx.cookie > 0 ||
+ chunk->async_tx.cookie == -EBUSY ||
+ &chunk->node == &sh_chan->ld_free))
+ break;
+ chunk->mark = DESC_SUBMITTED;
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ chunk->cookie = cookie;
+ list_move_tail(&chunk->node, &sh_chan->ld_queue);
+ last = chunk;
+ }
+
+ last->async_tx.callback = callback;
+ last->async_tx.callback_param = tx->callback_param;
- list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev);
+ dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
+ tx->cookie, &last->async_tx, sh_chan->id,
+ desc->hw.sar, desc->hw.tcr, desc->hw.dar);
spin_unlock_bh(&sh_chan->desc_lock);
return cookie;
}
+/* Called with desc_lock held */
static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
{
- struct sh_desc *desc, *_desc, *ret = NULL;
+ struct sh_desc *desc;
- spin_lock_bh(&sh_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) {
- if (async_tx_test_ack(&desc->async_tx)) {
+ list_for_each_entry(desc, &sh_chan->ld_free, node)
+ if (desc->mark != DESC_PREPARED) {
+ BUG_ON(desc->mark != DESC_IDLE);
list_del(&desc->node);
- ret = desc;
- break;
+ return desc;
}
- }
- spin_unlock_bh(&sh_chan->desc_lock);
-
- return ret;
-}
-
-static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc)
-{
- if (desc) {
- spin_lock_bh(&sh_chan->desc_lock);
-
- list_splice_init(&desc->tx_list, &sh_chan->ld_free);
- list_add(&desc->node, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
- }
+ return NULL;
}
static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
@@ -253,11 +266,10 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
- desc->async_tx.flags = DMA_CTRL_ACK;
- INIT_LIST_HEAD(&desc->tx_list);
- sh_dmae_put_desc(sh_chan, desc);
+ desc->mark = DESC_IDLE;
spin_lock_bh(&sh_chan->desc_lock);
+ list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
spin_unlock_bh(&sh_chan->desc_lock);
@@ -274,7 +286,10 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
- BUG_ON(!list_empty(&sh_chan->ld_queue));
+ /* Prepared and not submitted descriptors can still be on the queue */
+ if (!list_empty(&sh_chan->ld_queue))
+ sh_dmae_chan_ld_cleanup(sh_chan, true);
+
spin_lock_bh(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
@@ -293,6 +308,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
struct sh_dmae_chan *sh_chan;
struct sh_desc *first = NULL, *prev = NULL, *new;
size_t copy_size;
+ LIST_HEAD(tx_list);
+ int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
if (!chan)
return NULL;
@@ -302,108 +319,189 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sh_chan = to_sh_chan(chan);
+ /* Have to lock the whole loop to protect against concurrent release */
+ spin_lock_bh(&sh_chan->desc_lock);
+
+ /*
+ * Chaining:
+ * first descriptor is what user is dealing with in all API calls, its
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
+ * number
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
+ * all chunks are linked onto the tx_list head with their .node heads
+ * only during this function, then they are immediately spliced
+ * back onto the free list in form of a chain
+ */
do {
- /* Allocate the link descriptor from DMA pool */
+ /* Allocate the link descriptor from the free list */
new = sh_dmae_get_desc(sh_chan);
if (!new) {
dev_err(sh_chan->dev,
- "No free memory for link descriptor\n");
- goto err_get_desc;
+ "No free memory for link descriptor\n");
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &sh_chan->ld_free);
+ spin_unlock_bh(&sh_chan->desc_lock);
+ return NULL;
}
- copy_size = min(len, (size_t)SH_DMA_TCR_MAX);
+ copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
new->hw.sar = dma_src;
new->hw.dar = dma_dest;
new->hw.tcr = copy_size;
- if (!first)
+ if (!first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
- new->mark = DESC_NCOMP;
- async_tx_ack(&new->async_tx);
+ dev_dbg(sh_chan->dev,
+ "chaining %u of %u with %p, dst %x, cookie %d\n",
+ copy_size, len, &new->async_tx, dma_dest,
+ new->async_tx.cookie);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->chunks = chunks--;
prev = new;
len -= copy_size;
dma_src += copy_size;
dma_dest += copy_size;
/* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->tx_list);
+ list_add_tail(&new->node, &tx_list);
} while (len);
- new->async_tx.flags = flags; /* client is in control of this ack */
- new->async_tx.cookie = -EBUSY; /* Last desc */
+ if (new != first)
+ new->async_tx.cookie = -ENOSPC;
- return &first->async_tx;
+ /* Put them back on the free list, so, they don't get lost */
+ list_splice_tail(&tx_list, &sh_chan->ld_free);
-err_get_desc:
- sh_dmae_put_desc(sh_chan, first);
- return NULL;
+ spin_unlock_bh(&sh_chan->desc_lock);
+ return &first->async_tx;
}
-/*
- * sh_chan_ld_cleanup - Clean up link descriptors
- *
- * This function clean up the ld_queue of DMA channel.
- */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan)
+static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
{
struct sh_desc *desc, *_desc;
+ /* Is the "exposed" head of a chain acked? */
+ bool head_acked = false;
+ dma_cookie_t cookie = 0;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
spin_lock_bh(&sh_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
- dma_async_tx_callback callback;
- void *callback_param;
-
- /* non send data */
- if (desc->mark == DESC_NCOMP)
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
+ desc->mark != DESC_COMPLETED &&
+ desc->mark != DESC_WAITING);
+
+ /*
+ * queue is ordered, and we use this loop to (1) clean up all
+ * completed descriptors, and to (2) update descriptor flags of
+ * any chunks in a (partially) completed chain
+ */
+ if (!all && desc->mark == DESC_SUBMITTED &&
+ desc->cookie != cookie)
break;
- /* send data sesc */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
+ if (tx->cookie > 0)
+ cookie = tx->cookie;
- /* Remove from ld_queue list */
- list_splice_init(&desc->tx_list, &sh_chan->ld_free);
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+ BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
+ sh_chan->completed_cookie = desc->cookie;
+ }
- dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n",
- desc);
+ /* Call callback on the last chunk */
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
+ desc->mark = DESC_WAITING;
+ callback = tx->callback;
+ param = tx->callback_param;
+ dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
+ tx->cookie, tx, sh_chan->id);
+ BUG_ON(desc->chunks != 1);
+ break;
+ }
- list_move(&desc->node, &sh_chan->ld_free);
- /* Run the link descriptor callback function */
- if (callback) {
- spin_unlock_bh(&sh_chan->desc_lock);
- dev_dbg(sh_chan->dev, "link descriptor %p callback\n",
- desc);
- callback(callback_param);
- spin_lock_bh(&sh_chan->desc_lock);
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+ if (desc->mark == DESC_COMPLETED) {
+ BUG_ON(tx->cookie < 0);
+ desc->mark = DESC_WAITING;
+ }
+ head_acked = async_tx_test_ack(tx);
+ } else {
+ switch (desc->mark) {
+ case DESC_COMPLETED:
+ desc->mark = DESC_WAITING;
+ /* Fall through */
+ case DESC_WAITING:
+ if (head_acked)
+ async_tx_ack(&desc->async_tx);
+ }
+ }
+
+ dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
+ tx, tx->cookie);
+
+ if (((desc->mark == DESC_COMPLETED ||
+ desc->mark == DESC_WAITING) &&
+ async_tx_test_ack(&desc->async_tx)) || all) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+ list_move(&desc->node, &sh_chan->ld_free);
}
}
spin_unlock_bh(&sh_chan->desc_lock);
+
+ if (callback)
+ callback(param);
+
+ return callback;
+}
+
+/*
+ * sh_chan_ld_cleanup - Clean up link descriptors
+ *
+ * This function cleans up the ld_queue of DMA channel.
+ */
+static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
+{
+ while (__ld_cleanup(sh_chan, all))
+ ;
}
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
- struct list_head *ld_node;
- struct sh_dmae_regs hw;
+ struct sh_desc *sd;
+ spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_idle(sh_chan))
+ if (dmae_is_busy(sh_chan)) {
+ spin_unlock_bh(&sh_chan->desc_lock);
return;
+ }
/* Find the first un-transfer desciptor */
- for (ld_node = sh_chan->ld_queue.next;
- (ld_node != &sh_chan->ld_queue)
- && (to_sh_desc(ld_node)->mark == DESC_COMP);
- ld_node = ld_node->next)
- cpu_relax();
-
- if (ld_node != &sh_chan->ld_queue) {
- /* Get the ld start address from ld_queue */
- hw = to_sh_desc(ld_node)->hw;
- dmae_set_reg(sh_chan, hw);
- dmae_start(sh_chan);
- }
+ list_for_each_entry(sd, &sh_chan->ld_queue, node)
+ if (sd->mark == DESC_SUBMITTED) {
+ /* Get the ld start address from ld_queue */
+ dmae_set_reg(sh_chan, &sd->hw);
+ dmae_start(sh_chan);
+ break;
+ }
+
+ spin_unlock_bh(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
@@ -421,12 +519,11 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
- sh_dmae_chan_ld_cleanup(sh_chan);
+ sh_dmae_chan_ld_cleanup(sh_chan, false);
last_used = chan->cookie;
last_complete = sh_chan->completed_cookie;
- if (last_complete == -EBUSY)
- last_complete = last_used;
+ BUG_ON(last_complete < 0);
if (done)
*done = last_complete;
@@ -481,11 +578,13 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
err = sh_dmae_rst(0);
if (err)
return err;
+#ifdef SH_DMAC_BASE1
if (shdev->pdata.mode & SHDMA_DMAOR1) {
err = sh_dmae_rst(1);
if (err)
return err;
}
+#endif
disable_irq(irq);
return IRQ_HANDLED;
}
@@ -495,34 +594,25 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
static void dmae_do_tasklet(unsigned long data)
{
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
- struct sh_desc *desc, *_desc, *cur_desc = NULL;
+ struct sh_desc *desc;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
- list_for_each_entry_safe(desc, _desc,
- &sh_chan->ld_queue, node) {
- if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
- cur_desc = desc;
- break;
- }
- }
- if (cur_desc) {
- switch (cur_desc->async_tx.cookie) {
- case 0: /* other desc data */
- break;
- case -EBUSY: /* last desc */
- sh_chan->completed_cookie =
- cur_desc->async_tx.cookie;
- break;
- default: /* first desc ( 0 < )*/
- sh_chan->completed_cookie =
- cur_desc->async_tx.cookie - 1;
+ spin_lock(&sh_chan->desc_lock);
+ list_for_each_entry(desc, &sh_chan->ld_queue, node) {
+ if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
+ desc->mark == DESC_SUBMITTED) {
+ dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
+ desc->async_tx.cookie, &desc->async_tx,
+ desc->hw.dar);
+ desc->mark = DESC_COMPLETED;
break;
}
- cur_desc->mark = DESC_COMP;
}
+ spin_unlock(&sh_chan->desc_lock);
+
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
- sh_dmae_chan_ld_cleanup(sh_chan);
+ sh_dmae_chan_ld_cleanup(sh_chan, false);
}
static unsigned int get_dmae_irq(unsigned int id)
@@ -543,8 +633,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
/* alloc channel */
new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
if (!new_sh_chan) {
- dev_err(shdev->common.dev, "No free memory for allocating "
- "dma channels!\n");
+ dev_err(shdev->common.dev,
+ "No free memory for allocating dma channels!\n");
return -ENOMEM;
}
@@ -586,8 +676,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
"sh-dmae%d", new_sh_chan->id);
/* set up channel irq */
- err = request_irq(irq, &sh_dmae_interrupt,
- irqflags, new_sh_chan->dev_id, new_sh_chan);
+ err = request_irq(irq, &sh_dmae_interrupt, irqflags,
+ new_sh_chan->dev_id, new_sh_chan);
if (err) {
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
"with return %d\n", id, err);
@@ -676,6 +766,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
shdev->common.dev = &pdev->dev;
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
+ shdev->common.copy_align = 5;
#if defined(CONFIG_CPU_SH4)
/* Non Mix IRQ mode SH7722/SH7730 etc... */
@@ -688,8 +780,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
}
for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
- err = request_irq(eirq[ecnt], sh_dmae_err,
- irqflags, "DMAC Address Error", shdev);
+ err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev, "DMA device request_irq"
"error (irq %d) with return %d\n",
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b4bc15..108f1cf 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -13,9 +13,9 @@
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
-#include <linux/device.h>
-#include <linux/dmapool.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
@@ -26,24 +26,27 @@ struct sh_dmae_regs {
};
struct sh_desc {
- struct list_head tx_list;
struct sh_dmae_regs hw;
struct list_head node;
struct dma_async_tx_descriptor async_tx;
+ dma_cookie_t cookie;
+ int chunks;
int mark;
};
+struct device;
+
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
- spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
- struct list_head ld_free; /* Link descriptors free */
- struct dma_chan common; /* DMA common channel */
- struct device *dev; /* Channel device */
+ spinlock_t desc_lock; /* Descriptor operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Link descriptors free */
+ struct dma_chan common; /* DMA common channel */
+ struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
- int descs_allocated; /* desc count */
+ int descs_allocated; /* desc count */
int id; /* Raw id of this channel */
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[16]; /* unique name per DMAC of channel */
/* Set chcr */
int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index df5b684..3391e67 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -197,7 +197,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
edac_printk(KERN_DEBUG, EDAC_MC,
"pci-read, sdram scrub control value: %d \n", scrubval);
- for (i = 0; ARRAY_SIZE(scrubrates); i++) {
+ for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
if (scrubrates[i].scrubval == scrubval) {
*bw = scrubrates[i].bandwidth;
status = 0;
@@ -1700,11 +1700,14 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
*/
static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
{
- int dimm, size0, size1;
+ int dimm, size0, size1, factor = 0;
u32 dbam;
u32 *dcsb;
if (boot_cpu_data.x86 == 0xf) {
+ if (pvt->dclr0 & F10_WIDTH_128)
+ factor = 1;
+
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
@@ -1732,7 +1735,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0, dimm * 2 + 1, size1);
+ dimm * 2, size0 << factor,
+ dimm * 2 + 1, size1 << factor);
}
}
@@ -2345,7 +2349,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
- if (!dct_ganging_enabled(pvt)) {
+ if (!dct_ganging_enabled(pvt) && boot_cpu_data.x86 >= 0x10) {
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
}
@@ -2654,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
* the memory system completely. A command line option allows to force-enable
* hardware ECC later in amd64_enable_ecc_error_reporting().
*/
-static const char *ecc_warning =
- "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
- " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
- " Also, use of the override can cause unknown side effects.\n";
+static const char *ecc_msg =
+ "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
+ " Either enable ECC checking or force module loading by setting "
+ "'ecc_enable_override'.\n"
+ " (Note that use of the override may cause unknown side effects.)\n";
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
{
@@ -2669,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled)
- amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
+ amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
"is currently disabled, set F3x%x[22] (%s).\n",
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
else
@@ -2677,18 +2682,17 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
if (!nb_mce_en)
- amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
+ amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, pvt->mc_node_id);
if (!ecc_enabled || !nb_mce_en) {
if (!ecc_enable_override) {
- amd64_printk(KERN_WARNING, "%s", ecc_warning);
+ amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
}
- } else
- /* CLEAR the override, since BIOS controlled it */
ecc_enable_override = 0;
+ }
return 0;
}
@@ -2925,16 +2929,15 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
amd64_free_mc_sibling_devices(pvt);
- kfree(pvt);
- mci->pvt_info = NULL;
-
- mci_lookup[pvt->mc_node_id] = NULL;
-
/* unregister from EDAC MCE */
amd_report_gart_errors(false);
amd_unregister_ecc_decoder(amd64_decode_bus_error);
/* Free the EDAC CORE resources */
+ mci->pvt_info = NULL;
+ mci_lookup[pvt->mc_node_id] = NULL;
+
+ kfree(pvt);
edac_mc_free(mci);
}
@@ -3011,25 +3014,29 @@ static void amd64_setup_pci_device(void)
static int __init amd64_edac_init(void)
{
int nb, err = -ENODEV;
+ bool load_ok = false;
edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
opstate_init();
if (cache_k8_northbridges() < 0)
- return err;
+ goto err_ret;
msrs = msrs_alloc();
+ if (!msrs)
+ goto err_ret;
err = pci_register_driver(&amd64_pci_driver);
if (err)
- return err;
+ goto err_pci;
/*
* At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
* amd64_pvt structs. These will be used in the 2nd stage init function
* to finish initialization of the MC instances.
*/
+ err = -ENODEV;
for (nb = 0; nb < num_k8_northbridges; nb++) {
if (!pvt_lookup[nb])
continue;
@@ -3037,16 +3044,21 @@ static int __init amd64_edac_init(void)
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
goto err_2nd_stage;
- }
- amd64_setup_pci_device();
+ load_ok = true;
+ }
- return 0;
+ if (load_ok) {
+ amd64_setup_pci_device();
+ return 0;
+ }
err_2nd_stage:
- debugf0("2nd stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
-
+err_pci:
+ msrs_free(msrs);
+ msrs = NULL;
+err_ret:
return err;
}
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 422728c..fb60a87 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -534,8 +534,6 @@ static void edac_pci_dev_parity_clear(struct pci_dev *dev)
{
u8 header_type;
- debugf0("%s()\n", __func__);
-
get_pci_parity_status(dev, 0);
/* read the device TYPE, looking for bridges */
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 77a9579..adc10a2 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -577,7 +577,13 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
- channel = branch;
+
+ /*
+ * According with i5000 datasheet, bit 28 has no significance
+ * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD
+ */
+ channel = branch & 2;
+
bank = NREC_BANK(info->nrecmema);
rank = NREC_RANK(info->nrecmema);
rdwr = NREC_RDWR(info->nrecmema);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402..ecd5928 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
end <<= (24 - PAGE_SHIFT);
end |= (1 << (24 - PAGE_SHIFT)) - 1;
- csrow->first_page = start >> PAGE_SHIFT;
- csrow->last_page = end >> PAGE_SHIFT;
+ csrow->first_page = start;
+ csrow->last_page = end;
csrow->nr_pages = end + 1 - start;
csrow->grain = 8;
csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
mpc85xx_init_csrows(mci);
-#ifdef CONFIG_EDAC_DEBUG
- edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
-#endif
-
/* store the original error disable bits */
orig_ddr_err_disable =
in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 13efcd3..a9371b3 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,5 +1,10 @@
+menu "IEEE 1394 (FireWire) support"
+ depends on PCI || BROKEN
+ # firewire-core does not depend on PCI but is
+ # not useful without PCI controller driver
+
comment "You can enable one or both FireWire driver stacks."
-comment "See the help texts for more information."
+comment "The newer stack is recommended."
config FIREWIRE
tristate "FireWire driver stack"
@@ -15,16 +20,6 @@ config FIREWIRE
To compile this driver as a module, say M here: the module will be
called firewire-core.
- This module functionally replaces ieee1394, raw1394, and video1394.
- To access it from application programs, you generally need at least
- libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
- No libraries are required to access storage devices through the
- firewire-sbp2 driver.
-
- NOTE:
- FireWire audio devices currently require the old drivers (ieee1394,
- ohci1394, raw1394).
-
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
depends on PCI && FIREWIRE
@@ -34,22 +29,7 @@ config FIREWIRE_OHCI
is the only chipset in use, so say Y here.
To compile this driver as a module, say M here: The module will be
- called firewire-ohci. It replaces ohci1394 of the classic IEEE 1394
- stack.
-
- NOTE:
- If you want to install firewire-ohci and ohci1394 together, you
- should configure them only as modules and blacklist the driver(s)
- which you don't want to have auto-loaded. Add either
-
- blacklist firewire-ohci
- or
- blacklist ohci1394
- blacklist video1394
- blacklist dv1394
-
- to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution.
+ called firewire-ohci.
config FIREWIRE_OHCI_DEBUG
bool
@@ -66,8 +46,7 @@ config FIREWIRE_SBP2
like scanners.
To compile this driver as a module, say M here: The module will be
- called firewire-sbp2. It replaces sbp2 of the classic IEEE 1394
- stack.
+ called firewire-sbp2.
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
@@ -83,5 +62,8 @@ config FIREWIRE_NET
NOTE, this driver is not stable yet!
To compile this driver as a module, say M here: The module will be
- called firewire-net. It replaces eth1394 of the classic IEEE 1394
- stack.
+ called firewire-net.
+
+source "drivers/ieee1394/Kconfig"
+
+endmenu
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 7083bcc..5045156 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -57,6 +57,8 @@ static LIST_HEAD(descriptor_list);
static int descriptor_count;
static __be32 tmp_config_rom[256];
+/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
+static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
@@ -73,7 +75,7 @@ static __be32 tmp_config_rom[256];
#define BIB_CMC ((1) << 30)
#define BIB_IMC ((1) << 31)
-static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom)
+static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
int i, j, k, length;
@@ -130,23 +132,30 @@ static size_t generate_config_rom(struct fw_card *card, __be32 *config_rom)
for (i = 0; i < j; i += length + 1)
length = fw_compute_block_crc(config_rom + i);
- return j;
+ WARN_ON(j != config_rom_length);
}
static void update_config_roms(void)
{
struct fw_card *card;
- size_t length;
list_for_each_entry (card, &card_list, link) {
- length = generate_config_rom(card, tmp_config_rom);
- card->driver->set_config_rom(card, tmp_config_rom, length);
+ generate_config_rom(card, tmp_config_rom);
+ card->driver->set_config_rom(card, tmp_config_rom,
+ config_rom_length);
}
}
+static size_t required_space(struct fw_descriptor *desc)
+{
+ /* descriptor + entry into root dir + optional immediate entry */
+ return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
+}
+
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
+ int ret;
/*
* Check descriptor is valid; the length of all blocks in the
@@ -162,15 +171,21 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
mutex_lock(&card_mutex);
- list_add_tail(&desc->link, &descriptor_list);
- descriptor_count++;
- if (desc->immediate > 0)
+ if (config_rom_length + required_space(desc) > 256) {
+ ret = -EBUSY;
+ } else {
+ list_add_tail(&desc->link, &descriptor_list);
+ config_rom_length += required_space(desc);
descriptor_count++;
- update_config_roms();
+ if (desc->immediate > 0)
+ descriptor_count++;
+ update_config_roms();
+ ret = 0;
+ }
mutex_unlock(&card_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
@@ -179,6 +194,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_lock(&card_mutex);
list_del(&desc->link);
+ config_rom_length -= required_space(desc);
descriptor_count--;
if (desc->immediate > 0)
descriptor_count--;
@@ -428,7 +444,6 @@ EXPORT_SYMBOL(fw_card_initialize);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid)
{
- size_t length;
int ret;
card->max_receive = max_receive;
@@ -437,8 +452,8 @@ int fw_card_add(struct fw_card *card,
mutex_lock(&card_mutex);
- length = generate_config_rom(card, tmp_config_rom);
- ret = card->driver->enable(card, tmp_config_rom, length);
+ generate_config_rom(card, tmp_config_rom);
+ ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
if (ret == 0)
list_add_tail(&card->link, &card_list);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 231e6ee..4eeaed5 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -35,6 +35,7 @@
#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/time.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
@@ -595,14 +596,22 @@ static int ioctl_send_request(struct client *client, void *buffer)
client->device->max_speed);
}
+static inline bool is_fcp_request(struct fw_request *request)
+{
+ return request == NULL;
+}
+
static void release_request(struct client *client,
struct client_resource *resource)
{
struct inbound_transaction_resource *r = container_of(resource,
struct inbound_transaction_resource, resource);
- fw_send_response(client->device->card, r->request,
- RCODE_CONFLICT_ERROR);
+ if (is_fcp_request(r->request))
+ kfree(r->data);
+ else
+ fw_send_response(client->device->card, r->request,
+ RCODE_CONFLICT_ERROR);
kfree(r);
}
@@ -615,6 +624,7 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
struct address_handler_resource *handler = callback_data;
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
+ void *fcp_frame = NULL;
int ret;
r = kmalloc(sizeof(*r), GFP_ATOMIC);
@@ -626,6 +636,18 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
r->data = payload;
r->length = length;
+ if (is_fcp_request(request)) {
+ /*
+ * FIXME: Let core-transaction.c manage a
+ * single reference-counted copy?
+ */
+ fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
+ if (fcp_frame == NULL)
+ goto failed;
+
+ r->data = fcp_frame;
+ }
+
r->resource.release = release_request;
ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
if (ret < 0)
@@ -639,13 +661,16 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
e->request.closure = handler->closure;
queue_event(handler->client, &e->event,
- &e->request, sizeof(e->request), payload, length);
+ &e->request, sizeof(e->request), r->data, length);
return;
failed:
kfree(r);
kfree(e);
- fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+ kfree(fcp_frame);
+
+ if (!is_fcp_request(request))
+ fw_send_response(card, request, RCODE_CONFLICT_ERROR);
}
static void release_address_handler(struct client *client,
@@ -715,14 +740,16 @@ static int ioctl_send_response(struct client *client, void *buffer)
r = container_of(resource, struct inbound_transaction_resource,
resource);
+ if (is_fcp_request(r->request))
+ goto out;
+
if (request->length < r->length)
r->length = request->length;
-
if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
ret = -EFAULT;
+ kfree(r->request);
goto out;
}
-
fw_send_response(client->device->card, r->request, request->rcode);
out:
kfree(r);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 842739d..495849e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -432,14 +432,20 @@ static struct fw_address_handler *lookup_overlapping_address_handler(
return NULL;
}
+static bool is_enclosing_handler(struct fw_address_handler *handler,
+ unsigned long long offset, size_t length)
+{
+ return handler->offset <= offset &&
+ offset + length <= handler->offset + handler->length;
+}
+
static struct fw_address_handler *lookup_enclosing_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
list_for_each_entry(handler, list, link) {
- if (handler->offset <= offset &&
- offset + length <= handler->offset + handler->length)
+ if (is_enclosing_handler(handler, offset, length))
return handler;
}
@@ -465,6 +471,12 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static bool is_in_fcp_region(u64 offset, size_t length)
+{
+ return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
+}
+
/**
* fw_core_add_address_handler - register for incoming requests
* @handler: callback
@@ -477,8 +489,11 @@ const struct fw_address_region fw_unit_space_region =
* give the details of the particular request.
*
* Return value: 0 on success, non-zero otherwise.
+ *
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
+ *
+ * Address allocations are exclusive, except for the FCP registers.
*/
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
@@ -498,10 +513,12 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
- other =
- lookup_overlapping_address_handler(&address_handler_list,
- handler->offset,
- handler->length);
+ if (is_in_fcp_region(handler->offset, handler->length))
+ other = NULL;
+ else
+ other = lookup_overlapping_address_handler
+ (&address_handler_list,
+ handler->offset, handler->length);
if (other != NULL) {
handler->offset += other->length;
} else {
@@ -668,6 +685,9 @@ static struct fw_request *allocate_request(struct fw_packet *p)
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
+ if (WARN_ONCE(!request, "invalid for FCP address handlers"))
+ return;
+
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
@@ -686,26 +706,15 @@ void fw_send_response(struct fw_card *card,
}
EXPORT_SYMBOL(fw_send_response);
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+static void handle_exclusive_region_request(struct fw_card *card,
+ struct fw_packet *p,
+ struct fw_request *request,
+ unsigned long long offset)
{
struct fw_address_handler *handler;
- struct fw_request *request;
- unsigned long long offset;
unsigned long flags;
int tcode, destination, source;
- if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
- return;
-
- request = allocate_request(p);
- if (request == NULL) {
- /* FIXME: send statically allocated busy packet. */
- return;
- }
-
- offset =
- ((unsigned long long)
- HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
@@ -732,6 +741,73 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
request->data, request->length,
handler->callback_data);
}
+
+static void handle_fcp_region_request(struct fw_card *card,
+ struct fw_packet *p,
+ struct fw_request *request,
+ unsigned long long offset)
+{
+ struct fw_address_handler *handler;
+ unsigned long flags;
+ int tcode, destination, source;
+
+ if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
+ request->length > 0x200) {
+ fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+
+ return;
+ }
+
+ tcode = HEADER_GET_TCODE(p->header[0]);
+ destination = HEADER_GET_DESTINATION(p->header[0]);
+ source = HEADER_GET_SOURCE(p->header[1]);
+
+ if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
+ tcode != TCODE_WRITE_BLOCK_REQUEST) {
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+
+ return;
+ }
+
+ spin_lock_irqsave(&address_handler_lock, flags);
+ list_for_each_entry(handler, &address_handler_list, link) {
+ if (is_enclosing_handler(handler, offset, request->length))
+ handler->address_callback(card, NULL, tcode,
+ destination, source,
+ p->generation, p->speed,
+ offset, request->data,
+ request->length,
+ handler->callback_data);
+ }
+ spin_unlock_irqrestore(&address_handler_lock, flags);
+
+ fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+ struct fw_request *request;
+ unsigned long long offset;
+
+ if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+ return;
+
+ request = allocate_request(p);
+ if (request == NULL) {
+ /* FIXME: send statically allocated busy packet. */
+ return;
+ }
+
+ offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
+ p->header[2];
+
+ if (!is_in_fcp_region(offset, request->length))
+ handle_exclusive_region_request(card, p, request, offset);
+ else
+ handle_fcp_region_request(card, p, request, offset);
+
+}
EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420..2d3dc7d 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
static struct kmem_cache *fwnet_packet_task_cache;
+static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
+{
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+}
+
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
- struct fwnet_device *dev;
+ struct fwnet_device *dev = ptask->dev;
unsigned long flags;
-
- dev = ptask->dev;
+ bool free;
spin_lock_irqsave(&dev->lock, flags);
- list_del(&ptask->pt_link);
- spin_unlock_irqrestore(&dev->lock, flags);
- ptask->outstanding_pkts--; /* FIXME access inside lock */
+ ptask->outstanding_pkts--;
+
+ /* Check whether we or the networking TX soft-IRQ is last user. */
+ free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
+
+ if (ptask->outstanding_pkts == 0)
+ list_del(&ptask->pt_link);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
- } else {
- dev_kfree_skb_any(ptask->skb);
- kmem_cache_free(fwnet_packet_task_cache, ptask);
}
+
+ if (free)
+ fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
+ bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
- /* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
- list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
+ /* If the AT tasklet already ran, we may be last user. */
+ free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ if (!free)
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
+ goto out;
}
fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
- /* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
- list_add_tail(&ptask->pt_link, &dev->sent_list);
+
+ /* If the AT tasklet already ran, we may be last user. */
+ free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ if (!free)
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+
spin_unlock_irqrestore(&dev->lock, flags);
dev->netdev->trans_start = jiffies;
+ out:
+ if (free)
+ fwnet_free_ptask(ptask);
return 0;
}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
+ INIT_LIST_HEAD(&ptask->pt_link);
+
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 96768e1..43ebf33 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
- /*
- * FIXME: Cycle lost behavior should be configurable: lose
- * packet, retransmit or terminate..
- */
-
p = packet;
payload_index = payload;
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
if (!p->skip) {
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].req_count = cpu_to_le16(8);
+ /*
+ * Link the skip address to this descriptor itself. This causes
+ * a context to skip a cycle whenever lost cycles or FIFO
+ * overruns occur, without dropping the data. The application
+ * should then decide whether this is an error condition or not.
+ * FIXME: Make the context's cycle-lost behaviour configurable?
+ */
+ d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
@@ -2226,7 +2229,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
if (rest == 0)
return -EINVAL;
- /* FIXME: make packet-per-buffer/dual-buffer a context option */
while (rest > 0) {
d = context_get_descriptors(&ctx->context,
z + header_z, &d_bus);
@@ -2421,6 +2423,7 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
+#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
@@ -2470,7 +2473,10 @@ static int __devinit pci_probe(struct pci_dev *dev,
}
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+#if 0
+ /* FIXME: make it a context option or remove dual-buffer mode */
ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+#endif
/* dual-buffer mode is broken if more than one IR context is active */
if (dev->vendor == PCI_VENDOR_ID_AGERE &&
@@ -2486,7 +2492,8 @@ static int __devinit pci_probe(struct pci_dev *dev,
#if !defined(CONFIG_X86_32)
/* dual-buffer mode is broken with descriptor addresses above 2G */
if (dev->vendor == PCI_VENDOR_ID_TI &&
- dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
+ (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
+ dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
ohci->use_dualbuffer = false;
#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index a019b49..1f1d88a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -172,6 +172,15 @@ config GPIO_ADP5520
To compile this driver as a module, choose M here: the module will
be called adp5520-gpio.
+config GPIO_ADP5588
+ tristate "ADP5588 I2C GPIO expander"
+ depends on I2C
+ help
+ This option enables support for 18 GPIOs found
+ on Analog Devices ADP5588 GPIO Expanders.
+ To compile this driver as a module, choose M here: the module will be
+ called adp5588-gpio.
+
comment "PCI GPIO expanders:"
config GPIO_CS5535
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 52fe4cf..4868723 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,6 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
+obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
new file mode 100644
index 0000000..afc097a
--- /dev/null
+++ b/drivers/gpio/adp5588-gpio.c
@@ -0,0 +1,266 @@
+/*
+ * GPIO Chip driver for Analog Devices
+ * ADP5588 I/O Expander and QWERTY Keypad Controller
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+
+#include <linux/i2c/adp5588.h>
+
+#define DRV_NAME "adp5588-gpio"
+#define MAXGPIO 18
+#define ADP_BANK(offs) ((offs) >> 3)
+#define ADP_BIT(offs) (1u << ((offs) & 0x7))
+
+struct adp5588_gpio {
+ struct i2c_client *client;
+ struct gpio_chip gpio_chip;
+ struct mutex lock; /* protect cached dir, dat_out */
+ unsigned gpio_start;
+ uint8_t dat_out[3];
+ uint8_t dir[3];
+};
+
+static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Read Error\n");
+
+ return ret;
+}
+
+static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Write Error\n");
+
+ return ret;
+}
+
+static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
+ & ADP_BIT(off));
+}
+
+static void adp5588_gpio_set_value(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ unsigned bank, bit;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+ bit = ADP_BIT(off);
+
+ mutex_lock(&dev->lock);
+ if (val)
+ dev->dat_out[bank] |= bit;
+ else
+ dev->dat_out[bank] &= ~bit;
+
+ adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
+ dev->dat_out[bank]);
+ mutex_unlock(&dev->lock);
+}
+
+static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+{
+ int ret;
+ unsigned bank;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+
+ mutex_lock(&dev->lock);
+ dev->dir[bank] &= ~ADP_BIT(off);
+ ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int adp5588_gpio_direction_output(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ int ret;
+ unsigned bank, bit;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+ bit = ADP_BIT(off);
+
+ mutex_lock(&dev->lock);
+ dev->dir[bank] |= bit;
+
+ if (val)
+ dev->dat_out[bank] |= bit;
+ else
+ dev->dat_out[bank] &= ~bit;
+
+ ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
+ dev->dat_out[bank]);
+ ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank,
+ dev->dir[bank]);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int __devinit adp5588_gpio_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ struct adp5588_gpio *dev;
+ struct gpio_chip *gc;
+ int ret, i, revid;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ dev->client = client;
+
+ gc = &dev->gpio_chip;
+ gc->direction_input = adp5588_gpio_direction_input;
+ gc->direction_output = adp5588_gpio_direction_output;
+ gc->get = adp5588_gpio_get_value;
+ gc->set = adp5588_gpio_set_value;
+ gc->can_sleep = 1;
+
+ gc->base = pdata->gpio_start;
+ gc->ngpio = MAXGPIO;
+ gc->label = client->name;
+ gc->owner = THIS_MODULE;
+
+ mutex_init(&dev->lock);
+
+
+ ret = adp5588_gpio_read(dev->client, DEV_ID);
+ if (ret < 0)
+ goto err;
+
+ revid = ret & ADP5588_DEVICE_ID_MASK;
+
+ for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
+ dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
+ ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
+ ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
+ (pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = gpiochip_add(&dev->gpio_chip);
+ if (ret)
+ goto err;
+
+ dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+ gc->base, gc->base + gc->ngpio - 1,
+ client->name, revid);
+
+ if (pdata->setup) {
+ ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
+ if (ret < 0)
+ dev_warn(&client->dev, "setup failed, %d\n", ret);
+ }
+
+ i2c_set_clientdata(client, dev);
+ return 0;
+
+err:
+ kfree(dev);
+ return ret;
+}
+
+static int __devexit adp5588_gpio_remove(struct i2c_client *client)
+{
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ struct adp5588_gpio *dev = i2c_get_clientdata(client);
+ int ret;
+
+ if (pdata->teardown) {
+ ret = pdata->teardown(client,
+ dev->gpio_chip.base, dev->gpio_chip.ngpio,
+ pdata->context);
+ if (ret < 0) {
+ dev_err(&client->dev, "teardown failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = gpiochip_remove(&dev->gpio_chip);
+ if (ret) {
+ dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
+ return ret;
+ }
+
+ kfree(dev);
+ return 0;
+}
+
+static const struct i2c_device_id adp5588_gpio_id[] = {
+ {DRV_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
+
+static struct i2c_driver adp5588_gpio_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = adp5588_gpio_probe,
+ .remove = __devexit_p(adp5588_gpio_remove),
+ .id_table = adp5588_gpio_id,
+};
+
+static int __init adp5588_gpio_init(void)
+{
+ return i2c_add_driver(&adp5588_gpio_driver);
+}
+
+module_init(adp5588_gpio_init);
+
+static void __exit adp5588_gpio_exit(void)
+{
+ i2c_del_driver(&adp5588_gpio_driver);
+}
+
+module_exit(adp5588_gpio_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("GPIO ADP5588 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a25ad28..350842a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -858,8 +858,6 @@ int gpio_sysfs_set_active_low(unsigned gpio, int value)
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
- struct device *dev;
-
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
status = -ENODEV;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 96eddd1..305c590 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -66,6 +66,8 @@ config DRM_RADEON
If M is selected, the module will be called radeon.
+source "drivers/gpu/drm/radeon/Kconfig"
+
config DRM_I810
tristate "Intel I810"
depends on DRM && AGP && AGP_INTEL
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 470ef67..39c5aa7 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
+obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3..17be051 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
+ PAGE_SIZE);
if (gart_info->table_handle == NULL)
return -ENOMEM;
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+ if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+ (unsigned long long)gart_info->table_mask);
+ ret = 1;
+ goto done;
+ }
+
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e30..8417cc4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+ dmah = drm_pci_alloc(dev, map->size, map->size);
if (!dmah) {
kfree(map);
return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
while (entry->buf_count < count) {
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401..d91fb8c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6d..7d0f00a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_helper_crtc_in_use);
/**
- * drm_disable_unused_functions - disable unused objects
+ * drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
@@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
+ DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
mode->name, mode->base.id);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
/*
* we shouldn't end up with no modes here.
*/
- WARN(!count, "No connectors reported connected with modes\n");
+ if (count == 0)
+ printk(KERN_INFO "No connectors reported connected with modes\n");
drm_setup_crtcs(dev);
@@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff2f104..766c468 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
*/
-int drm_ioctl(struct inode *inode, struct file *filp,
+long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
+ struct drm_device *dev;
struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
char stack_kdata[128];
char *kdata = NULL;
+ dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
@@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
}
}
- retcode = func(dev, kdata, file_priv);
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ lock_kernel();
+ retcode = func(dev, kdata, file_priv);
+ unlock_kernel();
+ }
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c39b26f..ab6c973 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+ static const int n_sizes =
+ sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < n_sizes; i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@@ -633,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- printk(KERN_WARNING "integrated sync not supported\n");
- return NULL;
+ printk(KERN_WARNING "composite sync not supported\n");
}
/* it is incorrect if hsync/vsync width is zero */
@@ -681,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
@@ -911,23 +953,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
- int width, height;
+ int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
- case 0x40:
+ case 0x04:
width = height * 16 / 9;
break;
- case 0x80:
+ case 0x08:
width = height * 16 / 10;
break;
- case 0xc0:
+ case 0x0c:
width = height * 15 / 9;
break;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa0..0f9e9055 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
force = DRM_FORCE_ON;
break;
case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
else
@@ -389,7 +389,7 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->xres > fb->width || var->yres > fb->height) {
- DRM_ERROR("Requested width/height is greater than current fb "
- "object %dx%d > %dx%d\n", var->xres, var->yres,
- fb->width, fb->height);
- DRM_ERROR("Need resizing code.\n");
+ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+ fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e9dbb48..8bf3770 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
if (IS_ERR(obj->filp))
goto free;
- /* Basically we want to disable the OOM killer and handle ENOMEM
- * ourselves by sacrificing pages from cached buffers.
- * XXX shmem_file_[gs]et_gfp_mask()
- */
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_FS |
- __GFP_RECLAIMABLE |
- __GFP_NORETRY |
- __GFP_NOWARN |
- __GFP_NOMEMALLOC);
-
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 282d9fd..d61d185 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
&version->desc))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
+ err = drm_ioctl(file,
DRM_IOCTL_VERSION, (unsigned long)version);
if (err)
return err;
@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
if (err)
return err;
@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
}
typedef struct drm_map32 {
@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
if (__put_user(idx, &map->offset))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
if (err)
return err;
@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
|| __put_user(m32.flags, &map->flags))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
if (err)
return err;
@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RM_MAP, (unsigned long)map);
+ return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
}
typedef struct drm_client32 {
@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (__put_user(idx, &client->idx))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
if (err)
return err;
@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
if (err)
return err;
@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
|| __put_user(agp_start, &buf->agp_start))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
if (err)
return err;
@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
|| __put_user(b32.high_mark, &buf->high_mark))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
}
typedef struct drm_buf_info32 {
@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
if (err)
return err;
@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
if (err)
return err;
@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
&request->list))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
}
typedef struct drm_ctx_priv_map32 {
@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
&request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
if (err)
return err;
@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
&res->contexts))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RES_CTX, (unsigned long)res);
+ err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
if (err)
return err;
@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
&d->request_sizes))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_DMA, (unsigned long)d);
+ err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
if (err)
return err;
@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
if (put_user(m32.mode, &mode->mode))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
}
typedef struct drm_agp_info32 {
@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
if (err)
return err;
@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
|| __put_user(req32.type, &request->type))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.handle, &request->handle)
|| __get_user(req32.physical, &request->physical)
|| copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
return -EFAULT;
}
@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
}
typedef struct drm_agp_binding32 {
@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
}
#endif /* __OS_HAS_AGP */
@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
|| __put_user(x, &request->size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
if (err)
return err;
@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
__put_user(update32.data, &request->data))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
return err;
}
#endif
@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|| __put_user(req32.request.signal, &request->request.signal))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
if (err)
return err;
@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ return drm_ioctl(filp, cmd, arg);
fn = drm_compat_ioctls[nr];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee6..b98384d 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
+EXPORT_SYMBOL(drm_vblank_cleanup);
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
}
dev->vblank_disable_allowed = 0;
-
return 0;
err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index d7d7eac..2ac074c8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
- if (size < best_size) {
+ if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
@@ -405,10 +405,11 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
wasted += alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (entry->size >= size + wasted &&
+ (entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
- if (size < best_size) {
+ if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6d81a02..76d6339 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1,9 +1,4 @@
/*
- * The list_sort function is (presumably) licensed under the GPL (see the
- * top level "COPYING" file for details).
- *
- * The remainder of this file is:
- *
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
@@ -36,6 +31,7 @@
*/
#include <linux/list.h>
+#include <linux/list_sort.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
@@ -855,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
+ * @priv: unused
* @lh_a: list_head for first mode
* @lh_b: list_head for second mode
*
@@ -868,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
* Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
* positive if @lh_b is better than @lh_a.
*/
-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
{
struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
@@ -885,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
return diff;
}
-/* FIXME: what we don't have a list sort function? */
-/* list sort from Mark J Roberts (mjr@znex.org) */
-void list_sort(struct list_head *head,
- int (*cmp)(struct list_head *a, struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
-
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
-
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
-}
-
/**
* drm_mode_sort - sort mode list
* @mode_list: list to sort
@@ -975,7 +893,7 @@ void list_sort(struct list_head *head,
*/
void drm_mode_sort(struct list_head *mode_list)
{
- list_sort(mode_list, drm_mode_compare);
+ list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094f..e68ebf9 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
if (align > size)
return NULL;
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 9422a74..81681a0 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -408,6 +408,11 @@ static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *i
ch7006_info(client, "Detected version ID: %x\n", val);
+ /* I don't know what this is for, but otherwise I get no
+ * signal.
+ */
+ ch7006_write(client, 0x3d, 0x0);
+
return 0;
fail:
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index 87f5445..e447dfb 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -427,11 +427,6 @@ void ch7006_state_load(struct i2c_client *client,
ch7006_load_reg(client, state, CH7006_SUBC_INC7);
ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
-
- /* I don't know what this is for, but otherwise I get no
- * signal.
- */
- ch7006_write(client, 0x3d, 0x0);
}
void ch7006_state_save(struct i2c_client *client,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7d1d88c..de32d22 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a8..c1e0275 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 877bf6c..06bd732 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e..44f990b 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf..a894ade 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(pages[page], KM_USER0);
+ kunmap_atomic(mem, KM_USER0);
}
}
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
obj = obj_priv->obj;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("Failed to get pages: %d\n", ret);
spin_unlock(&dev_priv->mm.active_list_lock);
@@ -386,34 +386,6 @@ out:
return 0;
}
-static int i915_registers_info(struct seq_file *m, void *data) {
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t reg;
-
-#define DUMP_RANGE(start, end) \
- for (reg=start; reg < end; reg += 4) \
- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
-
- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
-
- return 0;
-}
-
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfea..2307f98 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ if (IS_I965G(dev))
+ dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
+ 0xf0;
+
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto fail_batch_free;
+ }
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
@@ -813,9 +819,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = dev_priv->has_gem;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
+ param->param);
return -EINVAL;
}
@@ -1117,7 +1127,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base, ll_base;
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
- if (IS_MOBILE(dev) || IS_I9XX(dev))
- dev_priv->cursor_needs_physical = true;
- else
- dev_priv->cursor_needs_physical = false;
-
- if (IS_I965G(dev) || IS_G33(dev))
- dev_priv->cursor_needs_physical = false;
-
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1260,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ intel_modeset_init(dev);
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
@@ -1271,8 +1276,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_modeset_init(dev);
-
drm_helper_initial_config(dev);
return 0;
@@ -1360,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
@@ -1376,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
+ dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_I9XX(dev) ? 0 : 1;
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
@@ -1652,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2fa2178..cf4cb3e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#include "drm_pciids.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -46,36 +45,149 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0400);
+unsigned int i915_lvds_downclock = 0;
+module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+
static struct drm_driver driver;
-static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
+#define INTEL_VGA_DEVICE(id, info) { \
+ .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class_mask = 0xffff00, \
+ .vendor = 0x8086, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+const static struct intel_device_info intel_i830_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_845g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i85x_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i865g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i915g_info = {
+ .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i915gm_info = {
+ .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945g_info = {
+ .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945gm_info = {
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i965g_info = {
+ .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_i965gm_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g33_info = {
+ .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g45_info = {
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_gm45_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_pineview_info = {
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_d_info = {
+ .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_m_info = {
+ .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct pci_device_id pciidlist[] = {
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ {0, 0, 0}
};
#if defined(CONFIG_DRM_I915_KMS)
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev || !dev_priv) {
- DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (i915_gem_idle(dev))
+ int error = i915_gem_idle(dev);
+ if (error) {
dev_err(&dev->pdev->dev,
- "GEM idle failed, resume may fail\n");
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
drm_irq_uninstall(dev);
}
@@ -83,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
intel_opregion_free(dev, 1);
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+
+ return 0;
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+ int error;
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
return 0;
}
-static int i915_resume(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
+ int error = 0;
i915_restore_state(dev);
@@ -113,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- ret = -1;
+ error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
- }
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
}
dev_priv->modeset_on_lid = 0;
- return ret;
+ return error;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+ return i915_drm_thaw(dev);
}
/**
@@ -268,22 +403,73 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
- return i915_suspend(dev, state);
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ error = i915_drm_freeze(drm_dev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
-static int
-i915_pci_resume(struct pci_dev *pdev)
+static int i915_pm_resume(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return i915_resume(dev);
+ return i915_resume(drm_dev);
}
+static int i915_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
+}
+
+static int i915_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_freeze(drm_dev);
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_resume,
+};
+
static struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
@@ -303,8 +489,11 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
+
+ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
.suspend = i915_suspend,
.resume = i915_resume,
+
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
@@ -329,7 +518,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
@@ -344,10 +533,7 @@ static struct drm_driver driver = {
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
-#ifdef CONFIG_PM
- .resume = i915_pci_resume,
- .suspend = i915_pci_suspend,
-#endif
+ .driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac7..b99b6a8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
struct intel_overlay;
+struct intel_device_info {
+ u8 is_mobile : 1;
+ u8 is_i8xx : 1;
+ u8 is_i915g : 1;
+ u8 is_i9xx : 1;
+ u8 is_i945gm : 1;
+ u8 is_i965g : 1;
+ u8 is_i965gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_ironlake : 1;
+ u8 has_fbc : 1;
+ u8 has_rc6 : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ const struct intel_device_info *info;
+
int has_gem;
void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
int hangcheck_count;
uint32_t last_acthd;
- bool cursor_needs_physical;
-
struct drm_mm vram;
unsigned long cfb_size;
@@ -263,6 +283,7 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int edp_support:1;
int lvds_ssc_freq;
+ int edp_bpp;
struct notifier_block lid_notifier;
@@ -287,8 +308,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveRENDERSTANDBY;
- u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -474,6 +493,15 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -561,6 +589,7 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -572,6 +601,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
@@ -703,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_lvds_downclock;
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
@@ -794,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -843,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj);
+int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
@@ -860,6 +895,9 @@ void i915_gem_shrinker_exit(void);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
+ int tiling_mode);
+bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1020,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
-
-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
- (dev)->pci_device == 0x27AE)
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- (dev)->pci_device == 0x0042 || \
- (dev)->pci_device == 0x0046)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12)
-
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- IS_GM45(dev))
-
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
-
-#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
- (dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2 || \
- (IS_PINEVIEW(dev)))
-
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
+#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IRONLAKE(dev))
+#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
+#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1054,17 +1058,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
- (IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_PINEVIEW(dev) && \
- !IS_IRONLAKE(dev))
-#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf..ec8a0d7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -321,40 +321,24 @@ fail_unlock:
return ret;
}
-static inline gfp_t
-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
-{
- return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
-}
-
-static inline void
-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
-{
- mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
-}
-
static int
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
{
int ret;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
/* If we've insufficient memory to map in the pages, attempt
* to make some space by throwing out some old buffers.
*/
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- gfp_t gfp;
ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
- gfp = i915_gem_object_get_page_gfp_mask(obj);
- i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
- ret = i915_gem_object_get_pages(obj);
- i915_gem_object_set_page_gfp_mask (obj, gfp);
+ ret = i915_gem_object_get_pages(obj, 0);
}
return ret;
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret != 0)
goto fail_unlock;
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2021,9 +2009,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -2039,6 +2024,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2099,8 +2088,8 @@ static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
int ret;
+ uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2122,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@@ -2229,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
}
int
-i915_gem_object_get_pages(struct drm_gem_object *obj)
+i915_gem_object_get_pages(struct drm_gem_object *obj,
+ gfp_t gfpmask)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page_count, i;
@@ -2255,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
inode = obj->filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
- page = read_mapping_page(mapping, i, NULL);
+ page = read_cache_page_gfp(mapping, i,
+ mapping_gfp_mask (mapping) |
+ __GFP_COLD |
+ gfpmask);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
i915_gem_object_put_pages(obj);
@@ -2578,12 +2573,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- bool retry_alloc = false;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
- if (dev_priv->mm.suspended)
- return -EBUSY;
-
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
@@ -2625,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
- }
- ret = i915_gem_object_get_pages(obj);
- if (retry_alloc) {
- i915_gem_object_set_page_gfp_mask (obj,
- i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
- }
+ ret = i915_gem_object_get_pages(obj, gfpmask);
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
@@ -2643,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
- if (! retry_alloc) {
- retry_alloc = true;
- goto search_free;
+ if (gfpmask) {
+ gfpmask = 0;
+ goto search_free;
}
return ret;
@@ -2723,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
+ BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2839,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
return 0;
}
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+
+ /* Wait on any GPU rendering and flushing to occur. */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ if (ret != 0)
+ return ret;
+ }
+
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
+ return 0;
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -3198,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
@@ -3206,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
+ bool need_fence;
+
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence && !i915_obj_fenceable(dev, obj))
+ i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
if (ret)
return ret;
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
entry->offset = obj_priv->gtt_offset;
/* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
+ struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
@@ -3463,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry **relocs)
{
@@ -3478,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL)
+ if (*relocs == NULL) {
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
return -ENOMEM;
+ }
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry *relocs)
{
uint32_t reloc_count = 0, i;
int ret = 0;
+ if (relocs == NULL)
+ return 0;
+
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
int unwritten;
@@ -3536,7 +3599,7 @@ err:
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3589,18 +3652,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
}
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs;
- int ret, ret2, i, pinned = 0;
+ struct drm_i915_gem_relocation_entry *relocs = NULL;
+ int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
@@ -3614,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
+ if (object_list == NULL) {
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
args->buffer_count);
ret = -ENOMEM;
goto pre_mutex_err;
}
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
ret = copy_from_user(cliprects,
(struct drm_clip_rect __user *)
@@ -3680,6 +3733,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3688,6 +3743,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3801,16 +3858,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ if (obj->write_domain)
+ list_move_tail(&obj_priv->gpu_write_list,
+ &dev_priv->mm.gpu_write_list);
+ else
+ list_del_init(&obj_priv->gpu_write_list);
+
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@@ -3884,8 +3948,101 @@ err:
mutex_unlock(&dev->struct_mutex);
+pre_mutex_err:
+ /* Copy the updated relocations out regardless of current error
+ * state. Failure to update the relocs would mean that the next
+ * time userland calls execbuf, it would do so with presumed offset
+ * state that didn't match the actual object state.
+ */
+ ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+ relocs);
+ if (ret2 != 0) {
+ DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+
+ if (ret == 0)
+ ret = ret2;
+ }
+
+ drm_free_large(object_list);
+ kfree(cliprects);
+
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (!IS_I965G(dev))
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = 0;
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
exec_list,
@@ -3898,25 +4055,62 @@ err:
}
}
- /* Copy the updated relocations out regardless of current error
- * state. Failure to update the relocs would mean that the next
- * time userland calls execbuf, it would do so with presumed offset
- * state that didn't match the actual object state.
- */
- ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
- relocs);
- if (ret2 != 0) {
- DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
- if (ret == 0)
- ret = ret2;
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
}
-pre_mutex_err:
- drm_free_large(object_list);
- drm_free_large(exec_list);
- kfree(cliprects);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+ drm_free_large(exec2_list);
return ret;
}
@@ -3933,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (ret)
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- return ret;
- }
- }
+
obj_priv->pin_count++;
/* If the object is not active and not pending a flush,
@@ -4203,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@@ -4654,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4708,7 +4892,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4766,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
if (!obj_priv->phys_obj)
return;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret)
goto out;
@@ -4824,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
DRM_ERROR("failed to get page list\n");
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c..df278b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
/**
- * Returns the size of the fence for a tiled object of the given size.
+ * Returns whether an object is currently fenceable. If not, it may need
+ * to be unbound and have its pitch adjusted.
*/
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
+bool
+i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
{
- int i;
- int start;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (IS_I965G(dev)) {
/* The 965 can have fences at any page boundary. */
- return ALIGN(size, 4096);
+ if (obj->size & 4095)
+ return false;
+ return true;
+ } else if (IS_I9XX(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
} else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
- for (i = start; i < size; i <<= 1)
- ;
+ /* Power of two sized... */
+ if (obj->size & (obj->size - 1))
+ return false;
- return i;
- }
+ /* Objects must be size aligned as well */
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+ return true;
}
/* Check pitch constriants for all chips & tiling formats */
-static bool
+bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride & (stride - 1))
return false;
- /* We don't 0handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
- return false;
-
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a2..13b0289 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
&batchbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_BATCHBUFFER,
+ return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
&cmdbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
+ return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
+ (unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
+ (unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
+ (unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
+ (unsigned long)request);
}
drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5d..a17d6bdf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir;
- u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
@@ -286,49 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
- for (;;) {
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
- break;
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ goto done;
- ret = IRQ_HANDLED;
+ ret = IRQ_HANDLED;
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- new_pch_iir = I915_READ(SDEIIR);
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
- I915_WRITE(DEIIR, de_iir);
- new_de_iir = I915_READ(DEIIR);
- I915_WRITE(GTIIR, gt_iir);
- new_gt_iir = I915_READ(GTIIR);
+ if (gt_iir & GT_USER_INTERRUPT) {
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ }
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
- if (gt_iir & GT_USER_INTERRUPT) {
- u32 seqno = i915_get_gem_seqno(dev);
- dev_priv->mm.irq_gem_seqno = seqno;
- trace_i915_gem_request_complete(dev, seqno);
- DRM_WAKEUP(&dev_priv->irq_queue);
- }
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ }
- if (de_iir & DE_GSE)
- ironlake_opregion_gse_intr(dev);
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ }
- /* check event from PCH */
- if ((de_iir & DE_PCH_EVENT) &&
- (pch_iir & SDE_HOTPLUG_MASK)) {
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- }
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
- de_iir = new_de_iir;
- gt_iir = new_gt_iir;
- pch_iir = new_pch_iir;
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(DEIIR, de_iir);
+
+done:
I915_WRITE(DEIER, de_ier);
(void)I915_READ(DEIER);
@@ -852,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IRONLAKE(dev))
- return 0;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_I965G(dev))
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -874,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IRONLAKE(dev))
- return;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -1023,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
+ dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
@@ -1084,6 +1094,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
(void) I915_READ(IER);
}
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1120,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- /* Leave other bits alone */
- hotplug_en |= HOTPLUG_EN_MASK;
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ /* Ignore TV since it's buggy */
+
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
- TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
- SDVOB_HOTPLUG_INT_STATUS;
- if (IS_G4X(dev)) {
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS |
- HDMIC_HOTPLUG_INT_STATUS |
- HDMID_HOTPLUG_INT_STATUS;
- }
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf..ab1bd2d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
@@ -879,13 +880,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
-#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
- HDMIC_HOTPLUG_INT_EN | \
- HDMID_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +976,8 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1751,6 +1747,8 @@
/* Display & cursor control */
+/* dithering flag on Ironlake */
+#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1818,7 +1816,7 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
-#define DSPFW_CURSORA_SHIFT 16
+#define DSPFW_CURSORA_SHIFT 8
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00..a3b90c9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
- dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
- }
-
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
- I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
- }
-
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f275677..15fbc1b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -33,6 +33,8 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
+static int panel_type;
+
static void *
find_section(struct bdb_header *bdb, int section_id)
{
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
+ panel_type = lvds_options->panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
memset(temp_mode, 0, sizeof(*temp_mode));
}
kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_edp *edp;
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
+ assume 18bpp panel color depth.\n");
+ dev_priv->edp_bpp = 18;
+ }
+ return;
+ }
+
+ switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+ case EDP_18BPP:
+ dev_priv->edp_bpp = 18;
+ break;
+ case EDP_24BPP:
+ dev_priv->edp_bpp = 24;
+ break;
+ case EDP_30BPP:
+ dev_priv->edp_bpp = 30;
+ break;
+ }
+}
+
+static void
parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev)
parse_sdvo_device_mapping(dev_priv, bdb);
parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_edp(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 425ac9d..4c18514 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -98,6 +98,7 @@ struct vbios_data {
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
@@ -426,6 +427,45 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+struct edp_power_seq {
+ u16 t3;
+ u16 t7;
+ u16 t9;
+ u16 t10;
+ u16 t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ u32 sdrrs_msa_timing_delay;
+ struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
bool intel_init_bios(struct drm_device *dev);
/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e5..79dd402 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b0..b27202d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -70,8 +70,6 @@ struct intel_limit {
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
- bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
};
#define I8XX_DOT_MIN 25000
@@ -242,38 +240,93 @@ struct intel_limit {
#define IRONLAKE_DOT_MAX 350000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
-#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 5
-#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 118
#define IRONLAKE_M1_MIN 12
-#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
-#define IRONLAKE_P_SDVO_DAC_MIN 5
-#define IRONLAKE_P_SDVO_DAC_MAX 80
-#define IRONLAKE_P_LVDS_MIN 28
-#define IRONLAKE_P_LVDS_MAX 112
-#define IRONLAKE_P1_MIN 1
-#define IRONLAKE_P1_MAX 8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
+/* We have parameter ranges for different type of outputs. */
+
+/* DAC & HDMI Refclk 120Mhz */
+#define IRONLAKE_DAC_N_MIN 1
+#define IRONLAKE_DAC_N_MAX 5
+#define IRONLAKE_DAC_M_MIN 79
+#define IRONLAKE_DAC_M_MAX 127
+#define IRONLAKE_DAC_P_MIN 5
+#define IRONLAKE_DAC_P_MAX 80
+#define IRONLAKE_DAC_P1_MIN 1
+#define IRONLAKE_DAC_P1_MAX 8
+#define IRONLAKE_DAC_P2_SLOW 10
+#define IRONLAKE_DAC_P2_FAST 5
+
+/* LVDS single-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_S_N_MIN 1
+#define IRONLAKE_LVDS_S_N_MAX 3
+#define IRONLAKE_LVDS_S_M_MIN 79
+#define IRONLAKE_LVDS_S_M_MAX 118
+#define IRONLAKE_LVDS_S_P_MIN 28
+#define IRONLAKE_LVDS_S_P_MAX 112
+#define IRONLAKE_LVDS_S_P1_MIN 2
+#define IRONLAKE_LVDS_S_P1_MAX 8
+#define IRONLAKE_LVDS_S_P2_SLOW 14
+#define IRONLAKE_LVDS_S_P2_FAST 14
+
+/* LVDS dual-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_D_N_MIN 1
+#define IRONLAKE_LVDS_D_N_MAX 3
+#define IRONLAKE_LVDS_D_M_MIN 79
+#define IRONLAKE_LVDS_D_M_MAX 127
+#define IRONLAKE_LVDS_D_P_MIN 14
+#define IRONLAKE_LVDS_D_P_MAX 56
+#define IRONLAKE_LVDS_D_P1_MIN 2
+#define IRONLAKE_LVDS_D_P1_MAX 8
+#define IRONLAKE_LVDS_D_P2_SLOW 7
+#define IRONLAKE_LVDS_D_P2_FAST 7
+
+/* LVDS single-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_S_SSC_N_MIN 1
+#define IRONLAKE_LVDS_S_SSC_N_MAX 2
+#define IRONLAKE_LVDS_S_SSC_M_MIN 79
+#define IRONLAKE_LVDS_S_SSC_M_MAX 126
+#define IRONLAKE_LVDS_S_SSC_P_MIN 28
+#define IRONLAKE_LVDS_S_SSC_P_MAX 112
+#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
+#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
+#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
+
+/* LVDS dual-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_D_SSC_N_MIN 1
+#define IRONLAKE_LVDS_D_SSC_N_MAX 3
+#define IRONLAKE_LVDS_D_SSC_M_MIN 79
+#define IRONLAKE_LVDS_D_SSC_M_MAX 126
+#define IRONLAKE_LVDS_D_SSC_P_MIN 14
+#define IRONLAKE_LVDS_D_SSC_P_MAX 42
+#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
+#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
+#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
+
+/* DisplayPort */
+#define IRONLAKE_DP_N_MIN 1
+#define IRONLAKE_DP_N_MAX 2
+#define IRONLAKE_DP_M_MIN 81
+#define IRONLAKE_DP_M_MAX 90
+#define IRONLAKE_DP_P_MIN 10
+#define IRONLAKE_DP_P_MAX 20
+#define IRONLAKE_DP_P2_FAST 10
+#define IRONLAKE_DP_P2_SLOW 10
+#define IRONLAKE_DP_P2_LIMIT 0
+#define IRONLAKE_DP_P1_MIN 1
+#define IRONLAKE_DP_P1_MAX 2
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
-static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
/* below parameter and function is for G4X Chipset Family*/
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- .find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_ironlake_sdvo = {
+static const intel_limit_t intel_limits_ironlake_dac = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
+ .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
+ .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_DAC_P2_SLOW,
+ .p2_fast = IRONLAKE_DAC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
- .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_lvds = {
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_LVDS_SLOW,
- .p2_fast = IRONLAKE_P2_LVDS_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = IRONLAKE_DOT_MIN,
+ .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN,
+ .max = IRONLAKE_VCO_MAX},
+ .n = { .min = IRONLAKE_DP_N_MIN,
+ .max = IRONLAKE_DP_N_MAX },
+ .m = { .min = IRONLAKE_DP_M_MIN,
+ .max = IRONLAKE_DP_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN,
+ .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN,
+ .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_DP_P_MIN,
+ .max = IRONLAKE_DP_P_MAX },
+ .p1 = { .min = IRONLAKE_DP_P1_MIN,
+ .max = IRONLAKE_DP_P1_MAX},
+ .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
+ .p2_slow = IRONLAKE_DP_P2_SLOW,
+ .p2_fast = IRONLAKE_DP_P2_FAST },
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_ironlake_lvds;
+ int refclk = 120;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
+ refclk = 100;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP) {
+ /* LVDS dual channel */
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ HAS_eDP)
+ limit = &intel_limits_ironlake_display_port;
else
- limit = &intel_limits_ironlake_sdvo;
+ limit = &intel_limits_ironlake_dac;
return limit;
}
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return (err != target);
}
-
-static bool
-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-
-{
- struct drm_device *dev = crtc->dev;
- intel_clock_t clock;
- int err = target;
- bool found = false;
-
- memcpy(&clock, best_clock, sizeof(intel_clock_t));
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in Pineview */
- if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
- break;
- for (clock.n = limit->n.min; clock.n <= limit->n.max;
- clock.n++) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- found = true;
- }
- }
- }
- }
-
- return found;
-}
-
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ int lvds_reg;
+
+ if (IS_IRONLAKE(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
+
+ /* return directly when it is eDP */
+ if (HAS_eDP)
+ return true;
+
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t clock;
- int err_most = 47;
- int err_min = 10000;
-
- /* eDP has only 2 clock choice, no n/m/p setting */
- if (HAS_eDP)
- return true;
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_ironlake_dp(limit, crtc, target,
- refclk, best_clock);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
- this_err = abs((10000 - (target*10000/clock.dot)));
- if (this_err < err_most) {
- *best_clock = clock;
- /* found on first matching */
- goto out;
- } else if (this_err < err_min) {
- *best_clock = clock;
- err_min = this_err;
- }
- }
- }
- }
- }
-out:
- return true;
-}
-
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_display_plane(obj);
if (ret != 0) {
i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
u32 temp;
int tries = 5, j, n;
+ u32 pipe_bpc;
+
+ temp = I915_READ(pipeconf_reg);
+ pipe_bpc = temp & PIPE_BPC_MASK;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
+ /*
+ * make the BPC in FDI Rx be consistent with that in
+ * pipeconf reg.
+ */
+ temp &= ~(0x7 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
FDI_SEL_PCDCLK |
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ drm_vblank_off(dev, pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
+ /* BPC in FDI rx is consistent with that in pipeconf */
+ temp &= ~(0x07 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
I915_READ(fdi_rx_reg);
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
-
+ temp = I915_READ(transconf_reg);
+ /* BPC in transcoder is consistent with that in pipeconf */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
+ I915_WRITE(transconf_reg, temp);
+ I915_READ(transconf_reg);
udelay(100);
/* disable PCH DPLL */
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
-const static int latency_ns = 5000;
+static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (HAS_FW_BLC(dev) && sr_hdisplay &&
(!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 6000;
+ static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (is_lvds && limit->find_reduced_pll &&
- dev_priv->lvds_downclock_avail) {
- memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
- has_reduced_clock = limit->find_reduced_pll(limit, crtc,
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
+ temp &= ~PIPE_BPC_MASK;
+ if (is_lvds) {
+ int lvds_reg = I915_READ(PCH_LVDS);
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+ } else if (is_edp) {
+ switch (dev_priv->edp_bpp/3) {
+ case 8:
+ temp |= PIPE_8BPC;
+ break;
+ case 10:
+ temp |= PIPE_10BPC;
+ break;
+ case 6:
+ temp |= PIPE_6BPC;
+ break;
+ case 12:
+ temp |= PIPE_12BPC;
+ break;
+ }
+ } else
+ temp |= PIPE_8BPC;
+ I915_WRITE(pipeconf_reg, temp);
+ I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
-
+ /* set the dithering flag */
+ if (IS_I965G(dev)) {
+ if (dev_priv->lvds_dither) {
+ if (IS_IRONLAKE(dev))
+ pipeconf |= PIPE_ENABLE_DITHER;
+ else
+ lvds |= LVDS_ENABLE_DITHER;
+ } else {
+ if (IS_IRONLAKE(dev))
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ else
+ lvds &= ~LVDS_ENABLE_DITHER;
+ }
+ }
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->cursor_needs_physical) {
+ if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
I915_WRITE(base, addr);
if (intel_crtc->cursor_bo) {
- if (dev_priv->cursor_needs_physical) {
+ if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-void intel_increase_renderclock(struct drm_device *dev, bool schedule)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- /* Restore render clock frequency to original value */
- if (IS_G4X(dev) || IS_I9XX(dev))
- pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
- else if (IS_I85X(dev))
- pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG_DRIVER("increasing render clock frequency\n");
-
- /* Schedule downclock */
- if (schedule)
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-}
-
-void intel_decrease_renderclock(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- if (IS_G4X(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
- gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I965G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
- gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
- gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I915G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
- gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I85X(dev)) {
- u16 hpllcc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
-
- /* Up to maximum... */
- hpllcc &= ~GC_CLOCK_CONTROL_MASK;
- hpllcc |= GC_CLOCK_133_200;
-
- pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
- }
- DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
-}
-
-/* Note that no increase function is needed for this - increase_renderclock()
- * will also rewrite these bits
- */
-void intel_decrease_displayclock(struct drm_device *dev)
-{
- if (IS_IRONLAKE(dev))
- return;
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
- IS_I915GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~0xf0;
- gcfgc |= 0x80;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- }
-}
-
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- /* GPU isn't processing, downclock it. */
- if (!dev_priv->busy) {
- intel_decrease_renderclock(dev);
- intel_decrease_displayclock(dev);
- }
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
+ if (!dev_priv->busy)
dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
- } else {
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
};
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->obj);
- drm_gem_object_unreference(work->obj);
+ i915_gem_object_unpin(work->old_fb_obj);
+ drm_gem_object_unreference(work->pending_flip_obj);
+ drm_gem_object_unreference(work->old_fb_obj);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
+ if (work && !work->pending) {
+ obj_priv = work->pending_flip_obj->driver_private;
+ DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
+ obj_priv,
+ atomic_read(&obj_priv->pending_flip));
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->obj->driver_private;
- if (atomic_dec_and_test(&obj_priv->pending_flip))
+ obj_priv = work->pending_flip_obj->driver_private;
+
+ /* Initial scanout buffer will have a 0 pending flip count */
+ if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+ atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
}
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work) {
intel_crtc->unpin_work->pending = 1;
+ } else {
+ DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
- int ret;
+ int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
- work->obj = intel_fb->obj;
+ work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
mutex_unlock(&dev->struct_mutex);
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ obj->driver_private);
kfree(work);
+ intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
- /* Reference the old fb object for the scheduled work. */
- drm_gem_object_reference(work->obj);
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(work->old_fb_obj);
+ drm_gem_object_reference(obj);
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
obj_priv = obj->driver_private;
atomic_inc(&obj_priv->pending_flip);
+ work->pending_flip_obj = obj;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
- OUT_RING((fb->width << 16) | fb->height);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev)
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
+ }
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
+ }
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED)
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
+ }
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev))
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
- if (SUPPORTS_INTEGRATED_DP(dev))
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
+ }
}
- if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
+ }
} else if (IS_I8XX(dev))
intel_dvo_init(dev);
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_changed = intelfb_probe,
};
+static struct drm_gem_object *
+intel_alloc_power_context(struct drm_device *dev)
+{
+ struct drm_gem_object *pwrctx;
+ int ret;
+
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return pwrctx;
+
+err_unpin:
+ i915_gem_object_unpin(pwrctx);
+err_unref:
+ drm_gem_object_unreference(pwrctx);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (I915_HAS_RC6(dev)) {
- struct drm_gem_object *pwrctx;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
obj_priv = dev_priv->pwrctx->driver_private;
} else {
- pwrctx = drm_gem_object_alloc(dev, 4096);
- if (!pwrctx) {
- DRM_DEBUG("failed to alloc power context, "
- "RC6 disabled\n");
- goto out;
- }
+ struct drm_gem_object *pwrctx;
- ret = i915_gem_object_pin(pwrctx, 4096);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n",
- ret);
- drm_gem_object_unreference(pwrctx);
- goto out;
+ pwrctx = intel_alloc_power_context(dev);
+ if (pwrctx) {
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
}
-
- i915_gem_object_set_to_gtt_domain(pwrctx, 1);
-
- dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
}
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ if (obj_priv) {
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
}
-
-out:
- return;
}
/* Set up chip specific display functions */
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
del_timer_sync(&intel_crtc->idle_timer);
}
- intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b..439506c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(int pixel_clock)
+intel_dp_link_required(struct drm_device *dev,
+ struct intel_output *intel_output, int pixel_clock)
{
- return pixel_clock * 3;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_eDP(intel_output))
+ return (pixel_clock * dev_priv->edp_bpp) / 8;
+ else
+ return pixel_clock * 3;
}
static int
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
int max_lanes = intel_dp_max_lane_count(intel_output);
- if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
+ > max_link_clock * max_lanes)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
- if (intel_dp_link_required(mode->clock) <= link_avail) {
+ if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
+ <= link_avail) {
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output)
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given DP is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it is assumed that the given
- * DP is present.
- */
-static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, dp_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- dp_port = 0;
- if (dp_reg == DP_B || dp_reg == PCH_DP_B)
- dp_port = PORT_IDPB;
- else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
- dp_port = PORT_IDPC;
- else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
- dp_port = PORT_IDPD;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not DP, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_DP &&
- p_child->device_type != DEVICE_TYPE_eDP)
- continue;
- /* Find the eDP port */
- if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
- ret = 1;
- break;
- }
- /* Find the DP port */
- if (p_child->dvo_port == dp_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_dp_priv *dp_priv;
const char *name = NULL;
- if (!dp_is_present_in_vbt(dev, output_reg)) {
- DRM_DEBUG_KMS("DP is not present. Ignore it\n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_output)) {
- intel_output->crtc_mask = (1 << 1);
+ if (IS_eDP(intel_output))
intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
- } else
- intel_output->crtc_mask = (1 << 0) | (1 << 1);
+
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
case DP_B:
case PCH_DP_B:
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
+ dev_priv->hotplug_supported_mask |=
+ HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
+ dev_priv->hotplug_supported_mask |=
+ HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753..aaabbcb 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+ ret = i915_gem_object_pin(fbo, 64*1024);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe..0e268de 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the given HDMI is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the given
- * HDMI is present.
- */
-static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct child_device_config *p_child;
- int i, hdmi_port, ret;
-
- if (!dev_priv->child_dev_num)
- return 1;
-
- if (hdmi_reg == SDVOB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == SDVOC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMIB)
- hdmi_port = DVO_B;
- else if (hdmi_reg == HDMIC)
- hdmi_port = DVO_C;
- else if (hdmi_reg == HDMID)
- hdmi_port = DVO_D;
- else
- return 0;
-
- ret = 0;
- for (i = 0; i < dev_priv->child_dev_num; i++) {
- p_child = dev_priv->child_dev + i;
- /*
- * If the device type is not HDMI, continue.
- */
- if (p_child->device_type != DEVICE_TYPE_HDMI)
- continue;
- /* Find the HDMI port */
- if (p_child->dvo_port == hdmi_port) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
- if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
- DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
- return;
- }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (sdvox_reg == SDVOB) {
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce2..c2e8a45 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
/* Some lid devices report incorrect lid status, assume they're connected */
static const struct dmi_system_id bad_lid_status[] = {
{
+ .ident = "Compaq nx9020",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "3084"),
+ },
+ },
+ {
+ .ident = "Samsung SX20S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
+ },
+ },
+ {
.ident = "Aspire One",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
+ {
+ .ident = "Aspire 1810T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
+ },
+ },
+ {
+ .ident = "PC-81005",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
+ {
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+ },
+ },
{ }
};
@@ -622,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
{
enum drm_connector_status status = connector_status_connected;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
return status;
@@ -679,7 +714,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ if (connector)
+ connector->status = connector->funcs->detect(connector);
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
@@ -854,65 +896,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-#ifdef CONFIG_ACPI
-/*
- * check_lid_device -- check whether @handle is an ACPI LID device.
- * @handle: ACPI device handle
- * @level : depth in the ACPI namespace tree
- * @context: the number of LID device when we find the device
- * @rv: a return value to fill if desired (Not use)
- */
-static acpi_status
-check_lid_device(acpi_handle handle, u32 level, void *context,
- void **return_value)
-{
- struct acpi_device *acpi_dev;
- int *lid_present = context;
-
- acpi_dev = NULL;
- /* Get the acpi device for device handle */
- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
- /* If there is no ACPI device for handle, return */
- return AE_OK;
- }
-
- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
- *lid_present = 1;
-
- return AE_OK;
-}
-
-/**
- * check whether there exists the ACPI LID device by enumerating the ACPI
- * device tree.
- */
-static int intel_lid_present(void)
-{
- int lid_present = 0;
-
- if (acpi_disabled) {
- /* If ACPI is disabled, there is no ACPI device tree to
- * check, so assume the LID device would have been present.
- */
- return 1;
- }
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- check_lid_device, NULL, &lid_present, NULL);
-
- return lid_present;
-}
-#else
-static int intel_lid_present(void)
-{
- /* In the absence of ACPI built in, assume that the LID device would
- * have been present.
- */
- return 1;
-}
-#endif
-
/**
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
@@ -957,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
}
}
mutex_unlock(&dev->mode_config.mutex);
- if (temp_downclock < panel_fixed_mode->clock) {
+ if (temp_downclock < panel_fixed_mode->clock &&
+ i915_lvds_downclock) {
/* We found the downclock for LVDS. */
dev_priv->lvds_downclock_avail = 1;
dev_priv->lvds_downclock = temp_downclock;
@@ -1031,12 +1015,8 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /*
- * Assume LVDS is present if there's an ACPI lid device or if the
- * device is present in the VBT.
- */
- if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
- DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
+ if (!lvds_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -1180,6 +1160,8 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc9..82678d3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
}
/**
- * Don't check status code from this as it switches the bus back to the
- * SDVO chips which defeats the purpose of doing a bus switch in the first
- * place.
+ * Try to read the response after issuie the DDC switch command. But it
+ * is noted that we must do the action of reading response and issuing DDC
+ * switch command in one I2C transaction. Otherwise when we try to start
+ * another I2C transaction after issuing the DDC bus switch, it will be
+ * switched to the internal SDVO register.
*/
static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
u8 target)
{
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ },
+ /* the following two are to read the response */
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = cmd_buf,
+ },
+ {
+ .addr = sdvo_priv->slave_addr >> 1,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ret_value,
+ },
+ };
+
+ intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &target, 1);
+ /* write the DDC switch command argument */
+ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
+
+ out_buf[0] = SDVO_I2C_OPCODE;
+ out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
+ cmd_buf[0] = SDVO_I2C_CMD_STATUS;
+ cmd_buf[1] = 0;
+ ret_value[0] = 0;
+ ret_value[1] = 0;
+
+ ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
+ if (ret != 3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return;
+ }
+ if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("DDC switch command returns response %d\n",
+ ret_value[0]);
+ return;
+ }
+ return;
}
static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
edid = drm_get_edid(&intel_output->base,
intel_output->ddc_bus);
+ /* This is only applied to SDVO cards with multiple outputs */
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
+ uint8_t saved_ddc, temp_ddc;
+ saved_ddc = sdvo_priv->ddc_bus;
+ temp_ddc = sdvo_priv->ddc_bus >> 1;
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ while(temp_ddc > 1) {
+ sdvo_priv->ddc_bus = temp_ddc;
+ edid = drm_get_edid(&intel_output->base,
+ intel_output->ddc_bus);
+ if (edid) {
+ /*
+ * When we can get the EDID, maybe it is the
+ * correct DDC bus. Update it.
+ */
+ sdvo_priv->ddc_bus = temp_ddc;
+ break;
+ }
+ temp_ddc >>= 1;
+ }
+ if (edid == NULL)
+ sdvo_priv->ddc_bus = saved_ddc;
+ }
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
+ } else if (flags & SDVO_OUTPUT_CVBS0) {
+
+ sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_priv->is_tv = true;
+ intel_output->needs_tv_clock = true;
+ intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 97ee566..ddfe161 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -68,7 +68,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d0047..c1f877b 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
if (err)
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
}
typedef struct drm_mga_getparam32 {
@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
}
typedef struct drm_mga_drm_bootstrap32 {
@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+ err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
(unsigned long)dma_bootstrap);
if (err)
return err;
@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d823e63..1175429 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,7 +30,7 @@ config DRM_NOUVEAU_DEBUG
via debugfs.
menu "I2C encoder or helper chips"
- depends on DRM
+ depends on DRM && DRM_KMS_HELPER && I2C
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1d90d4d..48c290b 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -8,14 +8,15 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_sgdma.o nouveau_dma.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
- nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o \
+ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+ nouveau_dp.o nouveau_grctx.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
+ nv40_grctx.o \
nv04_instmem.o nv50_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf4882..48227e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
{
int result;
- if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
&result))
return -ENODEV;
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
- if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ if (result) { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ } else { /* Stamina mode - disable the external GPU */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
NULL);
- } else { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5eec5ed..0e9cd1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -181,43 +181,42 @@ struct methods {
const char desc[8];
void (*loadbios)(struct drm_device *, uint8_t *);
const bool rw;
- int score;
};
static struct methods nv04_methods[] = {
{ "PROM", load_vbios_prom, false },
{ "PRAMIN", load_vbios_pramin, true },
{ "PCIROM", load_vbios_pci, true },
- { }
};
static struct methods nv50_methods[] = {
{ "PRAMIN", load_vbios_pramin, true },
{ "PROM", load_vbios_prom, false },
{ "PCIROM", load_vbios_pci, true },
- { }
};
+#define METHODCNT 3
+
static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct methods *methods, *method;
+ struct methods *methods;
+ int i;
int testscore = 3;
+ int scores[METHODCNT];
if (nouveau_vbios) {
- method = nv04_methods;
- while (method->loadbios) {
- if (!strcasecmp(nouveau_vbios, method->desc))
+ methods = nv04_methods;
+ for (i = 0; i < METHODCNT; i++)
+ if (!strcasecmp(nouveau_vbios, methods[i].desc))
break;
- method++;
- }
- if (method->loadbios) {
+ if (i < METHODCNT) {
NV_INFO(dev, "Attempting to use BIOS image from %s\n",
- method->desc);
+ methods[i].desc);
- method->loadbios(dev, data);
- if (score_vbios(dev, data, method->rw))
+ methods[i].loadbios(dev, data);
+ if (score_vbios(dev, data, methods[i].rw))
return true;
}
@@ -229,28 +228,24 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
else
methods = nv50_methods;
- method = methods;
- while (method->loadbios) {
+ for (i = 0; i < METHODCNT; i++) {
NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
- method->desc);
+ methods[i].desc);
data[0] = data[1] = 0; /* avoid reuse of previous image */
- method->loadbios(dev, data);
- method->score = score_vbios(dev, data, method->rw);
- if (method->score == testscore)
+ methods[i].loadbios(dev, data);
+ scores[i] = score_vbios(dev, data, methods[i].rw);
+ if (scores[i] == testscore)
return true;
- method++;
}
while (--testscore > 0) {
- method = methods;
- while (method->loadbios) {
- if (method->score == testscore) {
+ for (i = 0; i < METHODCNT; i++) {
+ if (scores[i] == testscore) {
NV_TRACE(dev, "Using BIOS image from %s\n",
- method->desc);
- method->loadbios(dev, data);
+ methods[i].desc);
+ methods[i].loadbios(dev, data);
return true;
}
- method++;
}
}
@@ -261,10 +256,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
- int length;
- int length_offset;
- int length_multiplier;
- bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
+ int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
struct bit_entry {
@@ -318,63 +310,22 @@ valid_reg(struct nvbios *bios, uint32_t reg)
struct drm_device *dev = bios->dev;
/* C51 has misaligned regs on purpose. Marvellous */
- if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
- NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
- reg);
- return 0;
- }
- /*
- * Warn on C51 regs that have not been verified accessible in
- * mmiotracing
- */
+ if (reg & 0x2 ||
+ (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
+
+ /* warn on C51 regs that haven't been verified accessible in tracing */
if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
- /* Trust the init scripts on G80 */
- if (dev_priv->card_type >= NV_50)
- return 1;
-
- #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
- if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
- return 1;
- if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
- return 1;
- if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
- (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
- WITHIN(reg, 0xc000, 0x48))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
- if (reg == 0x00011014 || reg == 0x00020328)
- return 1;
- if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
- return 1;
+ if (reg >= (8*1024*1024)) {
+ NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
+ return 0;
}
- if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
- return 1;
- if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
- return 1;
- if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
- return 1;
- if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
- return 1;
- if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
- return 1;
- if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
- WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
- return 1;
- #undef WITHIN
-
- NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
- return 0;
+ return 1;
}
static bool
@@ -820,7 +771,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
}
}
-static bool
+static int
init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -852,9 +803,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 7]);
uint8_t config;
uint32_t configval;
+ int len = 11 + count * 4;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
@@ -865,7 +817,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -874,10 +826,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, configval);
- return true;
+ return len;
}
-static bool
+static int
init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -912,10 +864,10 @@ init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->repeat = false;
- return true;
+ return 2;
}
-static bool
+static int
init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -951,9 +903,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 8]);
uint8_t config;
uint16_t freq;
+ int len = 12 + count * 2;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, IO Flag Condition: 0x%02X, "
@@ -966,7 +919,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -986,10 +939,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
setPLL(bios, reg, freq * 10);
- return true;
+ return len;
}
-static bool
+static int
init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1007,12 +960,12 @@ init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* we're not in repeat mode
*/
if (iexec->repeat)
- return false;
+ return 0;
- return true;
+ return 1;
}
-static bool
+static int
init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1041,7 +994,7 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t crtcdata;
if (!iexec->execute)
- return true;
+ return 11;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
"Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
@@ -1060,10 +1013,10 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
crtcdata |= (uint8_t)data;
bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
- return true;
+ return 11;
}
-static bool
+static int
init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1079,10 +1032,10 @@ init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
iexec->execute = !iexec->execute;
- return true;
+ return 1;
}
-static bool
+static int
init_io_flag_condition(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1100,7 +1053,7 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
if (io_flag_condition_met(bios, offset, cond))
BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
@@ -1109,10 +1062,10 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1140,11 +1093,12 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
uint32_t mask = ROM32(bios->data[offset + 9]);
uint32_t data = ROM32(bios->data[offset + 13]);
uint8_t count = bios->data[offset + 17];
+ int len = 18 + count * 2;
uint32_t value;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
"Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
@@ -1164,10 +1118,10 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, controlreg, value);
}
- return true;
+ return len;
}
-static bool
+static int
init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1196,25 +1150,26 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
uint8_t shift = bios->data[offset + 5];
uint8_t count = bios->data[offset + 6];
uint32_t reg = ROM32(bios->data[offset + 7]);
+ int len = 11 + count * 4;
uint8_t config;
uint32_t freq;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
offset, crtcport, crtcindex, mask, shift, count, reg);
if (!reg)
- return true;
+ return len;
config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
if (config > count) {
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return false;
+ return 0;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1224,10 +1179,10 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
setPLL(bios, reg, freq);
- return true;
+ return len;
}
-static bool
+static int
init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1244,16 +1199,16 @@ init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t freq = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
offset, reg, freq);
setPLL(bios, reg, freq);
- return true;
+ return 9;
}
-static bool
+static int
init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1277,12 +1232,13 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1290,7 +1246,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
@@ -1303,7 +1259,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1317,14 +1273,14 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1346,12 +1302,13 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1359,7 +1316,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
@@ -1374,14 +1331,14 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = 1;
msg.buf = &data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1401,13 +1358,14 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t count = bios->data[offset + 3];
+ int len = 4 + count;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
uint8_t data[256];
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
"Count: 0x%02X\n",
@@ -1415,7 +1373,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1429,13 +1387,13 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return false;
+ return 0;
}
- return true;
+ return len;
}
-static bool
+static int
init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1460,7 +1418,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t reg, value;
if (!iexec->execute)
- return true;
+ return 5;
BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1468,7 +1426,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return false;
+ return 0;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1476,10 +1434,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, reg + 4, value);
bios_wr32(bios, reg, tmdsaddr);
- return true;
+ return 5;
}
-static bool
+static int
init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1500,18 +1458,19 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
uint8_t mlv = bios->data[offset + 1];
uint8_t count = bios->data[offset + 2];
+ int len = 3 + count * 2;
uint32_t reg;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
offset, mlv, count);
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return false;
+ return 0;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1521,10 +1480,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, tmdsaddr);
}
- return true;
+ return len;
}
-static bool
+static int
init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1547,11 +1506,12 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
uint8_t crtcindex2 = bios->data[offset + 2];
uint8_t baseaddr = bios->data[offset + 3];
uint8_t count = bios->data[offset + 4];
+ int len = 5 + count;
uint8_t oldaddr, data;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
"BaseAddr: 0x%02X, Count: 0x%02X\n",
@@ -1568,10 +1528,10 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
- return true;
+ return len;
}
-static bool
+static int
init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1592,7 +1552,7 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t value;
if (!iexec->execute)
- return true;
+ return 4;
BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
offset, crtcindex, mask, data);
@@ -1601,10 +1561,10 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
value |= data;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
- return true;
+ return 4;
}
-static bool
+static int
init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1621,14 +1581,14 @@ init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 2];
if (!iexec->execute)
- return true;
+ return 3;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
- return true;
+ return 3;
}
-static bool
+static int
init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1645,18 +1605,19 @@ init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t count = bios->data[offset + 1];
+ int len = 2 + count * 2;
int i;
if (!iexec->execute)
- return true;
+ return len;
for (i = 0; i < count; i++)
init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
- return true;
+ return len;
}
-static bool
+static int
init_condition_time(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1680,7 +1641,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
unsigned cnt;
if (!iexec->execute)
- return true;
+ return 3;
if (retries > 100)
retries = 100;
@@ -1711,10 +1672,10 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 3;
}
-static bool
+static int
init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1734,10 +1695,11 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
uint32_t basereg = ROM32(bios->data[offset + 1]);
uint32_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
offset, basereg, count);
@@ -1749,10 +1711,10 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, data);
}
- return true;
+ return len;
}
-static bool
+static int
init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1768,7 +1730,7 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint16_t sub_offset = ROM16(bios->data[offset + 1]);
if (!iexec->execute)
- return true;
+ return 3;
BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
offset, sub_offset);
@@ -1777,10 +1739,10 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
- return true;
+ return 3;
}
-static bool
+static int
init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1808,7 +1770,7 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t srcvalue, dstvalue;
if (!iexec->execute)
- return true;
+ return 22;
BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
"Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
@@ -1827,10 +1789,10 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, dstreg, dstvalue | srcvalue);
- return true;
+ return 22;
}
-static bool
+static int
init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1848,14 +1810,14 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 4];
if (!iexec->execute)
- return true;
+ return 5;
bios_idxprt_wr(bios, crtcport, crtcindex, data);
- return true;
+ return 5;
}
-static bool
+static int
init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1903,8 +1865,8 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- if (dev_priv->card_type >= NV_50)
- return true;
+ if (dev_priv->card_type >= NV_40)
+ return 1;
/*
* On every card I've seen, this step gets done for us earlier in
@@ -1922,10 +1884,10 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
/* write back the saved configuration value */
bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
- return true;
+ return 1;
}
-static bool
+static int
init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -1959,10 +1921,10 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
- return true;
+ return 13;
}
-static bool
+static int
init_configure_mem(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1983,7 +1945,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return false;
+ return 0;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2015,10 +1977,10 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
bios_wr32(bios, reg, data);
}
- return true;
+ return 1;
}
-static bool
+static int
init_configure_clk(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2038,7 +2000,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return false;
+ return 0;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2048,10 +2010,10 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
clock *= 2;
setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
- return true;
+ return 1;
}
-static bool
+static int
init_configure_preinit(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2071,15 +2033,15 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return false;
+ return 0;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
- return true;
+ return 1;
}
-static bool
+static int
init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2099,7 +2061,7 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t data = bios->data[offset + 4];
if (!iexec->execute)
- return true;
+ return 5;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
offset, crtcport, mask, data);
@@ -2158,15 +2120,15 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
for (i = 0; i < 2; i++)
bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
bios, 0x614108 + (i*0x800)) & 0x0fffffff);
- return true;
+ return 5;
}
bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
data);
- return true;
+ return 5;
}
-static bool
+static int
init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2181,7 +2143,7 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t sub = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
@@ -2191,10 +2153,10 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
- return true;
+ return 2;
}
-static bool
+static int
init_ram_condition(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2215,7 +2177,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
uint8_t data;
if (!iexec->execute)
- return true;
+ return 3;
data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
@@ -2229,10 +2191,10 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
iexec->execute = false;
}
- return true;
+ return 3;
}
-static bool
+static int
init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2251,17 +2213,17 @@ init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t data = ROM32(bios->data[offset + 9]);
if (!iexec->execute)
- return true;
+ return 13;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
offset, reg, mask, data);
bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
- return true;
+ return 13;
}
-static bool
+static int
init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2285,7 +2247,7 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int i;
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
"Count: 0x%02X\n",
@@ -2300,10 +2262,10 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, reg, data);
}
- return true;
+ return 2;
}
-static bool
+static int
init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2315,10 +2277,10 @@ init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
/* mild retval abuse to stop parsing this table */
- return false;
+ return 0;
}
-static bool
+static int
init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2330,15 +2292,15 @@ init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
if (iexec->execute)
- return true;
+ return 1;
iexec->execute = true;
BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
- return true;
+ return 1;
}
-static bool
+static int
init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2353,7 +2315,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
unsigned time = ROM16(bios->data[offset + 1]);
if (!iexec->execute)
- return true;
+ return 3;
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
offset, time);
@@ -2363,10 +2325,10 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
else
msleep((time + 900) / 1000);
- return true;
+ return 3;
}
-static bool
+static int
init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2383,7 +2345,7 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
@@ -2394,10 +2356,10 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2414,7 +2376,7 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t cond = bios->data[offset + 1];
if (!iexec->execute)
- return true;
+ return 2;
BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
@@ -2425,10 +2387,10 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
iexec->execute = false;
}
- return true;
+ return 2;
}
-static bool
+static int
init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2451,7 +2413,7 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint8_t value;
if (!iexec->execute)
- return true;
+ return 6;
BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
"Data: 0x%02X\n",
@@ -2460,10 +2422,10 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
bios_idxprt_wr(bios, crtcport, crtcindex, value);
- return true;
+ return 6;
}
-static bool
+static int
init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2481,16 +2443,16 @@ init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint16_t freq = ROM16(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 7;
BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
setPLL(bios, reg, freq * 10);
- return true;
+ return 7;
}
-static bool
+static int
init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2507,17 +2469,17 @@ init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t value = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
if (reg == 0x000200)
value |= 1;
bios_wr32(bios, reg, value);
- return true;
+ return 9;
}
-static bool
+static int
init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2543,14 +2505,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
uint8_t type = bios->data[offset + 1];
uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
+ int len = 2 + bios->ram_restrict_group_count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
NV_ERROR(dev, "PLL limits table not version 3.x\n");
- return true; /* deliberate, allow default clocks to remain */
+ return len; /* deliberate, allow default clocks to remain */
}
entry = pll_limits + pll_limits[1];
@@ -2563,15 +2526,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
offset, type, reg, freq);
setPLL(bios, reg, freq);
- return true;
+ return len;
}
}
NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
- return true;
+ return len;
}
-static bool
+static int
init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2581,10 +2544,10 @@ init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
- return true;
+ return 1;
}
-static bool
+static int
init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2594,10 +2557,10 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
*/
- return true;
+ return 1;
}
-static bool
+static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2615,14 +2578,17 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
const uint8_t *gpio_entry;
int i;
+ if (!iexec->execute)
+ return 1;
+
if (bios->bdcb.version != 0x40) {
NV_ERROR(bios->dev, "DCB table not version 4.0\n");
- return false;
+ return 0;
}
if (!bios->bdcb.gpio_table_ptr) {
NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
- return false;
+ return 0;
}
gpio_entry = gpio_table + gpio_table[1];
@@ -2660,13 +2626,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
bios_wr32(bios, r, v);
}
- return true;
+ return 1;
}
-/* hack to avoid moving the itbl_entry array before this function */
-int init_ram_restrict_zm_reg_group_blocklen;
-
-static bool
+static int
init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2692,21 +2655,21 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
uint8_t regincrement = bios->data[offset + 5];
uint8_t count = bios->data[offset + 6];
uint32_t strap_ramcfg, data;
- uint16_t blocklen;
+ /* previously set by 'M' BIT table */
+ uint16_t blocklen = bios->ram_restrict_group_count * 4;
+ int len = 7 + count * blocklen;
uint8_t index;
int i;
- /* previously set by 'M' BIT table */
- blocklen = init_ram_restrict_zm_reg_group_blocklen;
if (!iexec->execute)
- return true;
+ return len;
if (!blocklen) {
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return false;
+ return 0;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2724,10 +2687,10 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
reg += regincrement;
}
- return true;
+ return len;
}
-static bool
+static int
init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2744,14 +2707,14 @@ init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
uint32_t dstreg = ROM32(bios->data[offset + 5]);
if (!iexec->execute)
- return true;
+ return 9;
bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
- return true;
+ return 9;
}
-static bool
+static int
init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -2769,20 +2732,21 @@ init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
uint32_t reg = ROM32(bios->data[offset + 1]);
uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
int i;
if (!iexec->execute)
- return true;
+ return len;
for (i = 0; i < count; i++) {
uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
bios_wr32(bios, reg, data);
}
- return true;
+ return len;
}
-static bool
+static int
init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2793,10 +2757,10 @@ init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* Seemingly does nothing
*/
- return true;
+ return 1;
}
-static bool
+static int
init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2829,13 +2793,13 @@ init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
val <<= bios->data[offset + 16];
if (!iexec->execute)
- return true;
+ return 17;
bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
- return true;
+ return 17;
}
-static bool
+static int
init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2859,13 +2823,13 @@ init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
val = (val & mask) | ((val + add) & ~mask);
if (!iexec->execute)
- return true;
+ return 13;
bios_wr32(bios, reg, val);
- return true;
+ return 13;
}
-static bool
+static int
init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2883,32 +2847,33 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_device *dev = bios->dev;
struct nouveau_i2c_chan *auxch;
uint32_t addr = ROM32(bios->data[offset + 1]);
- uint8_t len = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 2;
int ret, i;
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return false;
+ return 0;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return false;
+ return 0;
}
if (!iexec->execute)
- return true;
+ return len;
offset += 6;
- for (i = 0; i < len; i++, offset += 2) {
+ for (i = 0; i < count; i++, offset += 2) {
uint8_t data;
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return false;
+ return 0;
}
data &= bios->data[offset + 0];
@@ -2917,14 +2882,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
-static bool
+static int
init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2941,106 +2906,99 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_device *dev = bios->dev;
struct nouveau_i2c_chan *auxch;
uint32_t addr = ROM32(bios->data[offset + 1]);
- uint8_t len = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count;
int ret, i;
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return false;
+ return 0;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return false;
+ return 0;
}
if (!iexec->execute)
- return true;
+ return len;
offset += 6;
- for (i = 0; i < len; i++, offset++) {
+ for (i = 0; i < count; i++, offset++) {
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return false;
+ return 0;
}
}
- return true;
+ return len;
}
static struct init_tbl_entry itbl_entry[] = {
/* command name , id , length , offset , mult , command handler */
/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
- { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog },
- { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat },
- { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll },
- { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat },
- { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy },
- { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not },
- { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition },
- { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched },
- { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 },
- { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 },
- { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte },
- { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte },
- { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c },
- { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds },
- { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group },
- { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch },
- { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr },
- { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr },
- { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group },
- { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time },
- { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence },
+ { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
+ { "INIT_REPEAT" , 0x33, init_repeat },
+ { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
+ { "INIT_END_REPEAT" , 0x36, init_end_repeat },
+ { "INIT_COPY" , 0x37, init_copy },
+ { "INIT_NOT" , 0x38, init_not },
+ { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
+ { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
+ { "INIT_PLL2" , 0x4B, init_pll2 },
+ { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
+ { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
+ { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
+ { "INIT_TMDS" , 0x4F, init_tmds },
+ { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
+ { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
+ { "INIT_CR" , 0x52, init_cr },
+ { "INIT_ZM_CR" , 0x53, init_zm_cr },
+ { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
+ { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
+ { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
- { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct },
- { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg },
- { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io },
- { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem },
- { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset },
- { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem },
- { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk },
- { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit },
- { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io },
- { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub },
- { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition },
- { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg },
- { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro },
- { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done },
- { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume },
+ { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
+ { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
+ { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
+ { "INIT_RESET" , 0x65, init_reset },
+ { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
+ { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
+ { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
+ { "INIT_IO" , 0x69, init_io },
+ { "INIT_SUB" , 0x6B, init_sub },
+ { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
+ { "INIT_NV_REG" , 0x6E, init_nv_reg },
+ { "INIT_MACRO" , 0x6F, init_macro },
+ { "INIT_DONE" , 0x71, init_done },
+ { "INIT_RESUME" , 0x72, init_resume },
/* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
- { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time },
- { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition },
- { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition },
- { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io },
- { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll },
- { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg },
- /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
- { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll },
- { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c },
- { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d },
- { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio },
- /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
- { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group },
- { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg },
- { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched },
- { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved },
- { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 },
- { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 },
- { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch },
- { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
- { NULL , 0 , 0 , 0 , 0 , NULL }
+ { "INIT_TIME" , 0x74, init_time },
+ { "INIT_CONDITION" , 0x75, init_condition },
+ { "INIT_IO_CONDITION" , 0x76, init_io_condition },
+ { "INIT_INDEX_IO" , 0x78, init_index_io },
+ { "INIT_PLL" , 0x79, init_pll },
+ { "INIT_ZM_REG" , 0x7A, init_zm_reg },
+ { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
+ { "INIT_8C" , 0x8C, init_8c },
+ { "INIT_8D" , 0x8D, init_8d },
+ { "INIT_GPIO" , 0x8E, init_gpio },
+ { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
+ { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
+ { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
+ { "INIT_RESERVED" , 0x92, init_reserved },
+ { "INIT_96" , 0x96, init_96 },
+ { "INIT_97" , 0x97, init_97 },
+ { "INIT_AUXCH" , 0x98, init_auxch },
+ { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
+ { NULL , 0 , NULL }
};
-static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
-{
- /* Calculates the length of a given init table entry. */
- return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
-}
-
#define MAX_TABLE_OPS 1000
static int
@@ -3056,7 +3014,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i;
+ int count = 0, i, res;
uint8_t id;
/*
@@ -3076,22 +3034,21 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
offset, itbl_entry[i].id, itbl_entry[i].name);
/* execute eventual command handler */
- if (itbl_entry[i].handler)
- if (!(*itbl_entry[i].handler)(bios, offset, iexec))
- break;
+ res = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (!res)
+ break;
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += res;
} else {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
-
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += get_init_table_entry_length(bios, offset, i);
}
if (offset >= bios->length)
@@ -3198,16 +3155,25 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
}
#ifdef __powerpc__
/* Powerbook specific quirks */
- if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
- nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
- if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
- if (script == LVDS_PANEL_ON) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
- }
- if (script == LVDS_PANEL_OFF) {
- bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
- bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ if ((dev->pci_device & 0xffff) == 0x0179 ||
+ (dev->pci_device & 0xffff) == 0x0189 ||
+ (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_RESET) {
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+
+ } else if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+
+ } else if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
+ bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
+ & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT,
+ bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
}
}
#endif
@@ -3799,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct init_exec iexec = {true, false};
struct nvbios *bios = &dev_priv->VBIOS;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
@@ -3854,7 +3819,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
* script tables is a pointer to the script to execute.
*/
- NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
+ NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
otable = bios_output_config_match(dev, dcbent, table[1] +
bios->display.script_table_ptr,
@@ -3879,27 +3844,25 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
}
- bios->display.output = dcbent;
-
if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
- NV_DEBUG(dev, "output script 0 not found\n");
+ NV_DEBUG_KMS(dev, "output script 0 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
script = ROM16(otable[8]);
if (!script) {
- NV_DEBUG(dev, "output script 1 not found\n");
+ NV_DEBUG_KMS(dev, "output script 1 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
if (table[4] >= 12)
@@ -3907,12 +3870,12 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
else
script = 0;
if (!script) {
- NV_DEBUG(dev, "output script 2 not found\n");
+ NV_DEBUG_KMS(dev, "output script 2 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3924,19 +3887,19 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
if (script)
script = clkcmptable(bios, script, -pxclk);
if (!script) {
- NV_DEBUG(dev, "clock script 1 not found\n");
+ NV_DEBUG_KMS(dev, "clock script 1 not found\n");
return 1;
}
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@@ -4606,10 +4569,6 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
* stuff that we don't use - their use currently unknown
*/
- uint16_t rr_strap_xlat;
- uint8_t rr_group_count;
- int i;
-
/*
* Older bios versions don't have a sufficiently long table for
* what we want
@@ -4618,24 +4577,13 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
return 0;
if (bitentry->id[1] < 2) {
- rr_group_count = bios->data[bitentry->offset + 2];
- rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
} else {
- rr_group_count = bios->data[bitentry->offset + 0];
- rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
}
- /* adjust length of INIT_87 */
- for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
- itbl_entry[i].length += rr_group_count * 4;
-
- /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
- for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
- itbl_entry[i].length_multiplier = rr_group_count * 4;
-
- init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
- bios->ram_restrict_tbl_ptr = rr_strap_xlat;
-
return 0;
}
@@ -5234,7 +5182,7 @@ parse_dcb_connector_table(struct nvbios *bios)
int i;
if (!bios->bdcb.connector_table_ptr) {
- NV_DEBUG(dev, "No DCB connector table present\n");
+ NV_DEBUG_KMS(dev, "No DCB connector table present\n");
return;
}
@@ -5451,52 +5399,49 @@ static bool
parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
- if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
- conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
- conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
- conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
- conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
- conn != 0xf2205004 && conn != 0xf2209004) {
- NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
-
- /* cause output setting to fail for !TV, so message is seen */
- if ((conn & 0xf) != 0x1)
- dcb->entries = 0;
-
- return false;
- }
- /* most of the below is a "best guess" atm */
- entry->type = conn & 0xf;
- if (entry->type == 2)
- /* another way of specifying straps based lvds... */
+ switch (conn & 0x0000000f) {
+ case 0:
+ entry->type = OUTPUT_ANALOG;
+ break;
+ case 1:
+ entry->type = OUTPUT_TV;
+ break;
+ case 2:
+ case 3:
entry->type = OUTPUT_LVDS;
- if (entry->type == 4) { /* digital */
- if (conn & 0x10)
- entry->type = OUTPUT_LVDS;
- else
+ break;
+ case 4:
+ switch ((conn & 0x000000f0) >> 4) {
+ case 0:
entry->type = OUTPUT_TMDS;
+ break;
+ case 1:
+ entry->type = OUTPUT_LVDS;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
+ (conn & 0x000000f0) >> 4);
+ return false;
+ }
+ break;
+ default:
+ NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
+ return false;
}
- /* what's in bits 5-13? could be some encoder maker thing, in tv case */
- entry->i2c_index = (conn >> 14) & 0xf;
- /* raw heads field is in range 0-1, so move to 1-2 */
- entry->heads = ((conn >> 18) & 0x7) + 1;
- entry->location = (conn >> 21) & 0xf;
- /* unused: entry->bus = (conn >> 25) & 0x7; */
- /* set or to be same as heads -- hopefully safe enough */
- entry->or = entry->heads;
+
+ entry->i2c_index = (conn & 0x0003c000) >> 14;
+ entry->heads = ((conn & 0x001c0000) >> 18) + 1;
+ entry->or = entry->heads; /* same as heads, hopefully safe enough */
+ entry->location = (conn & 0x01e00000) >> 21;
+ entry->bus = (conn & 0x0e000000) >> 25;
entry->duallink_possible = false;
switch (entry->type) {
case OUTPUT_ANALOG:
entry->crtconf.maxfreq = (conf & 0xffff) * 10;
break;
- case OUTPUT_LVDS:
- /*
- * This is probably buried in conn's unknown bits.
- * This will upset EDID-ful models, if they exist
- */
- entry->lvdsconf.use_straps_for_mode = true;
- entry->lvdsconf.use_power_scripts = true;
+ case OUTPUT_TV:
+ entry->tvconf.has_component_output = false;
break;
case OUTPUT_TMDS:
/*
@@ -5505,8 +5450,12 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
*/
fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
break;
- case OUTPUT_TV:
- entry->tvconf.has_component_output = false;
+ case OUTPUT_LVDS:
+ if ((conn & 0x00003f00) != 0x10)
+ entry->lvdsconf.use_straps_for_mode = true;
+ entry->lvdsconf.use_power_scripts = true;
+ break;
+ default:
break;
}
@@ -5581,11 +5530,13 @@ void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
dcb->entries = newentries;
}
-static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct bios_parsed_dcb *bdcb = &bios->bdcb;
struct parsed_dcb *dcb;
- uint16_t dcbptr, i2ctabptr = 0;
+ uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
bool configblock = true;
@@ -5596,16 +5547,18 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool two
dcb->entries = 0;
/* get the offset from 0x36 */
- dcbptr = ROM16(bios->data[0x36]);
+ if (dev_priv->card_type > NV_04) {
+ dcbptr = ROM16(bios->data[0x36]);
+ if (dcbptr == 0x0000)
+ NV_WARN(dev, "No output data (DCB) found in BIOS\n");
+ }
+ /* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_WARN(dev, "No output data (DCB) found in BIOS, "
- "assuming a CRT output exists\n");
- /* this situation likely means a really old card, pre DCB */
+ NV_INFO(dev, "Assuming a CRT output exists\n");
fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_tv_output(dcb, twoHeads);
return 0;
@@ -5909,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
+ mutex_lock(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
+ mutex_unlock(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -5920,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
+ mutex_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 1d5f10b..fd94bd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
+ struct mutex lock;
+
uint8_t data[NV_PROM_SIZE];
unsigned int length;
bool execute;
@@ -227,6 +229,7 @@ struct nvbios {
uint16_t pll_limit_tbl_ptr;
uint16_t ram_restrict_tbl_ptr;
+ uint8_t ram_restrict_group_count;
uint16_t some_script_ptr; /* BIT I + 14 */
uint16_t init96_tbl_ptr; /* BIT I + 16 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 320a14b..028719f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -33,10 +33,13 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include <linux/log2.h>
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
+ if (nvbo->tile)
+ nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
+
spin_lock(&dev_priv->ttm.bo_list_lock);
list_del(&nvbo->head);
spin_unlock(&dev_priv->ttm.bo_list_lock);
kfree(nvbo);
}
+static void
+nouveau_bo_fixup_align(struct drm_device *dev,
+ uint32_t tile_mode, uint32_t tile_flags,
+ int *align, int *size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * Some of the tile_flags have a periodic structure of N*4096 bytes,
+ * align to to that as well as the page size. Align the size to the
+ * appropriate boundaries. This does imply that sizes are rounded up
+ * 3-7 pages, so be aware of this and do not waste memory by allocating
+ * many small buffers.
+ */
+ if (dev_priv->card_type == NV_50) {
+ uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
+ int i;
+
+ switch (tile_flags) {
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7a00:
+ if (is_power_of_2(block_size)) {
+ for (i = 1; i < 10; i++) {
+ *align = 12 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ } else {
+ for (i = 1; i < 10; i++) {
+ *align = 8 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ }
+ *size = roundup(*size, *align);
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ if (tile_mode) {
+ if (dev_priv->chipset >= 0x40) {
+ *align = 65536;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x30) {
+ *align = 32768;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x20) {
+ *align = 16384;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x10) {
+ *align = 16384;
+ *size = roundup(*size, 32 * tile_mode);
+ }
+ }
+ }
+
+ /* ALIGN works only on powers of two. */
+ *size = roundup(*size, PAGE_SIZE);
+
+ if (dev_priv->card_type == NV_50) {
+ *size = roundup(*size, 65536);
+ *align = max(65536, *align);
+ }
+}
+
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret, n = 0;
+ int ret = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Overallocate memory to
- * avoid corruption of other buffer objects.
- */
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (dev_priv->chipset >= 0xA0) {
- /* This is based on high end cards with 448 bits
- * memory bus, could be different elsewhere.*/
- size += 6 * 28672;
- /* 8 * 28672 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 8 * 28672;
- } else if (dev_priv->chipset >= 0x90) {
- size += 3 * 16384;
- align = 12 * 16834;
- } else {
- size += 3 * 8192;
- /* 12 * 8192 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 12 * 8192;
- }
- break;
- default:
- break;
- }
-
+ nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
align >>= PAGE_SHIFT;
- size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- if (dev_priv->card_type == NV_50) {
- size = (size + 65535) & ~65535;
- if (align < (65536 / PAGE_SIZE))
- align = (65536 / PAGE_SIZE);
- }
-
- if (flags & TTM_PL_FLAG_VRAM)
- nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
- if (flags & TTM_PL_FLAG_TT)
- nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
- nvbo->placement.placement = nvbo->placements;
- nvbo->placement.busy_placement = nvbo->placements;
- nvbo->placement.num_placement = n;
- nvbo->placement.num_busy_placement = n;
+ nouveau_bo_placement_set(nvbo, flags);
nvbo->channel = chan;
- nouveau_bo_placement_set(nvbo, flags);
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
@@ -154,6 +187,11 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
nvbo->placement.busy_placement = nvbo->placements;
nvbo->placement.num_placement = n;
nvbo->placement.num_busy_placement = n;
+
+ if (nvbo->pin_refcnt) {
+ while (n--)
+ nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
+ }
}
int
@@ -311,8 +349,10 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
struct drm_device *dev = dev_priv->dev;
switch (dev_priv->gart_info.type) {
+#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
+#endif
case NOUVEAU_GART_SGDMA:
return nouveau_sgdma_init_ttm(dev);
default:
@@ -398,16 +438,23 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
struct nouveau_bo *nvbo = nouveau_bo(bo);
switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_SYSTEM);
+ break;
default:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
break;
}
+
+ *pl = nvbo->placement;
}
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
* TTM_PL_{VRAM,TT} directly.
*/
+
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -422,6 +469,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem);
+ if (nvbo->channel && nvbo->channel != chan)
+ ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
return ret;
}
@@ -442,22 +491,20 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ int no_wait, struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
uint64_t src_offset, dst_offset;
uint32_t page_count;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->tile_flags || nvbo->no_vm) {
+ if (!chan || nvbo->tile_flags || nvbo->no_vm)
chan = dev_priv->channel;
- if (!chan)
- return -EINVAL;
- }
src_offset = old_mem->mm_node->start << PAGE_SHIFT;
dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
@@ -537,7 +584,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -549,7 +596,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
if (ret)
goto out;
@@ -575,7 +622,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -587,7 +634,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
if (ret)
goto out;
@@ -602,51 +649,106 @@ out:
}
static int
-nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+ struct nouveau_tile_reg **new_tile)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_device *dev = dev_priv->dev;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ uint64_t offset;
int ret;
- if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
- !nvbo->no_vm) {
- uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
+ /* Nothing to do. */
+ *new_tile = NULL;
+ return 0;
+ }
+
+ offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
new_mem->size, nvbo->tile_flags,
offset);
if (ret)
return ret;
+
+ } else if (dev_priv->card_type >= NV_10) {
+ *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+ nvbo->tile_mode);
}
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ return 0;
+}
+
+static void
+nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
+ struct nouveau_tile_reg *new_tile,
+ struct nouveau_tile_reg **old_tile)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ if (dev_priv->card_type >= NV_10 &&
+ dev_priv->card_type < NV_50) {
+ if (*old_tile)
+ nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
+ }
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_tile_reg *new_tile = NULL;
+ int ret = 0;
+
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+
+ /* Software copy if the card isn't up and running yet. */
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
+ !dev_priv->channel) {
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ goto out;
+ }
+
+ /* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
bo->mem = *new_mem;
new_mem->mm_node = NULL;
- return 0;
+ goto out;
}
- if (new_mem->mem_type == TTM_PL_SYSTEM) {
- if (old_mem->mem_type == TTM_PL_SYSTEM)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
- if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else {
- if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- }
+ /* Hardware assisted copy. */
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
- return 0;
+ if (!ret)
+ goto out;
+
+ /* Fallback to software copy. */
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+out:
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9aaa972..2281f99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
+ nouveau_dma_pre_init(chan);
+
/* Locate channel's user control regs */
if (dev_priv->card_type < NV_40)
user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return 0;
}
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t caches;
- int idle;
-
- if (!chan) {
- NV_ERROR(dev, "no channel...\n");
- return 1;
- }
-
- caches = nv_rd32(dev, NV03_PFIFO_CACHES);
- nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
-
- if (engine->fifo.channel_id(dev) != chan->id) {
- struct nouveau_gpuobj *ramfc =
- chan->ramfc ? chan->ramfc->gpuobj : NULL;
-
- if (!ramfc) {
- NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
- return 1;
- }
-
- engine->instmem.prepare_access(dev, false);
- if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
- idle = 0;
- else
- idle = 1;
- engine->instmem.finish_access(dev);
- } else {
- idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, caches);
- return idle;
-}
-
/* stops a fifo */
void
nouveau_channel_free(struct nouveau_channel *chan)
@@ -317,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
/* Ensure the channel is no longer active on the GPU */
pfifo->reassign(dev, false);
- if (pgraph->channel(dev) == chan) {
- pgraph->fifo_access(dev, false);
+ pgraph->fifo_access(dev, false);
+ if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
- pgraph->fifo_access(dev, true);
- }
pgraph->destroy_context(chan);
+ pgraph->fifo_access(dev, true);
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
@@ -414,7 +374,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
init->subchan[0].grclass = 0x0039;
else
init->subchan[0].grclass = 0x5039;
- init->nr_subchan = 1;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 032cf09..d2f6335 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -24,9 +24,12 @@
*
*/
+#include <acpi/button.h>
+
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
+
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
@@ -83,14 +86,17 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
static void
nouveau_connector_destroy(struct drm_connector *drm_connector)
{
- struct nouveau_connector *connector = nouveau_connector(drm_connector);
- struct drm_device *dev = connector->base.dev;
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(drm_connector);
+ struct drm_device *dev;
- NV_DEBUG(dev, "\n");
-
- if (!connector)
+ if (!nv_connector)
return;
+ dev = nv_connector->base.dev;
+ NV_DEBUG_KMS(dev, "\n");
+
+ kfree(nv_connector->edid);
drm_sysfs_connector_remove(drm_connector);
drm_connector_cleanup(drm_connector);
kfree(drm_connector);
@@ -233,10 +239,21 @@ nouveau_connector_detect(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
+#ifdef CONFIG_ACPI
+ if (!nouveau_ignorelid && !acpi_lid_open())
+ return connector_status_disconnected;
+#endif
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
+ /* Cleanup the previous EDID block. */
+ if (nv_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ kfree(nv_connector->edid);
+ nv_connector->edid = NULL;
+ }
+
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nouveau_connector_ddc_prepare(connector, &flags);
@@ -247,7 +264,7 @@ nouveau_connector_detect(struct drm_connector *connector)
if (!nv_connector->edid) {
NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
drm_get_connector_name(connector));
- return connector_status_disconnected;
+ goto detect_analog;
}
if (nv_encoder->dcb->type == OUTPUT_DP &&
@@ -281,6 +298,7 @@ nouveau_connector_detect(struct drm_connector *connector)
return connector_status_connected;
}
+detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
@@ -420,7 +438,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
/* Use preferred mode if there is one.. */
list_for_each_entry(mode, &connector->base.probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- NV_DEBUG(dev, "native mode from preferred\n");
+ NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
}
@@ -445,7 +463,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
largest = mode;
}
- NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
+ NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
high_w, high_h, high_v);
return largest ? drm_mode_duplicate(dev, largest) : NULL;
}
@@ -687,8 +705,12 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
!dev_priv->VBIOS.pub.fp_no_ddc) {
- nv_connector->edid =
+ struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
+ if (edid) {
+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ *(nv_connector->edid) = *edid;
+ }
}
if (!nv_connector->edid)
@@ -725,7 +747,7 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
struct drm_encoder *encoder;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 7035536..50d9e67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -29,12 +29,22 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+void
+nouveau_dma_pre_init(struct nouveau_channel *chan)
+{
+ chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+}
+
int
nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *m2mf = NULL;
+ struct nouveau_gpuobj *nvsw = NULL;
int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+ if (ret)
+ return ret;
+
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
}
- /* Initialise DMA vars */
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
- chan->dma.put = 0;
- chan->dma.cur = chan->dma.put;
- chan->dma.free = chan->dma.max - chan->dma.cur;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
+ /* Initialise NV_SW */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
@@ -106,47 +126,52 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
chan->dma.cur += nr_dwords;
}
-static inline bool
-READ_GET(struct nouveau_channel *chan, uint32_t *get)
+/* Fetch and adjust GPU GET pointer
+ *
+ * Returns:
+ * value >= 0, the adjusted GET pointer
+ * -EINVAL if GET pointer currently outside main push buffer
+ * -EBUSY if timeout exceeded
+ */
+static inline int
+READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
{
uint32_t val;
val = nvchan_rd32(chan, chan->user_get);
- if (val < chan->pushbuf_base ||
- val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
- /* meaningless to dma_wait() except to know whether the
- * GPU has stalled or not
- */
- *get = val;
- return false;
+
+ /* reset counter as long as GET is still advancing, this is
+ * to avoid misdetecting a GPU lockup if the GPU happens to
+ * just be processing an operation that takes a long time
+ */
+ if (val != *prev_get) {
+ *prev_get = val;
+ *timeout = 0;
+ }
+
+ if ((++*timeout & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (*timeout > 100000)
+ return -EBUSY;
}
- *get = (val - chan->pushbuf_base) >> 2;
- return true;
+ if (val < chan->pushbuf_base ||
+ val > chan->pushbuf_base + (chan->dma.max << 2))
+ return -EINVAL;
+
+ return (val - chan->pushbuf_base) >> 2;
}
int
nouveau_dma_wait(struct nouveau_channel *chan, int size)
{
- uint32_t get, prev_get = 0, cnt = 0;
- bool get_valid;
+ uint32_t prev_get = 0, cnt = 0;
+ int get;
while (chan->dma.free < size) {
- /* reset counter as long as GET is still advancing, this is
- * to avoid misdetecting a GPU lockup if the GPU happens to
- * just be processing an operation that takes a long time
- */
- get_valid = READ_GET(chan, &get);
- if (get != prev_get) {
- prev_get = get;
- cnt = 0;
- }
-
- if ((++cnt & 0xff) == 0) {
- DRM_UDELAY(1);
- if (cnt > 100000)
- return -EBUSY;
- }
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
/* loop until we have a usable GET pointer. the value
* we read from the GPU may be outside the main ring if
@@ -157,7 +182,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* from the SKIPS area, so the code below doesn't have to deal
* with some fun corner cases.
*/
- if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+ if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
continue;
if (get <= chan->dma.cur) {
@@ -183,6 +208,19 @@ nouveau_dma_wait(struct nouveau_channel *chan, int size)
* after processing the currently pending commands.
*/
OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+
+ /* wait for GET to depart from the skips area.
+ * prevents writing GET==PUT and causing a race
+ * condition that causes us to think the GPU is
+ * idle when it's not.
+ */
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get == -EBUSY))
+ return -EBUSY;
+ if (unlikely(get == -EINVAL))
+ continue;
+ } while (get <= NOUVEAU_DMA_SKIPS);
WRITE_PUT(NOUVEAU_DMA_SKIPS);
/* we're now submitting commands at the start of
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 04e85d8..dabfd65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -46,10 +46,11 @@
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
NvSubM2MF = 0,
- NvSub2D = 1,
- NvSubCtxSurf2D = 1,
- NvSubGdiRect = 2,
- NvSubImageBlit = 3
+ NvSubSw = 1,
+ NvSub2D = 2,
+ NvSubCtxSurf2D = 2,
+ NvSubGdiRect = 3,
+ NvSubImageBlit = 4
};
/* Object handles. */
@@ -67,6 +68,7 @@ enum {
NvClipRect = 0x8000000b,
NvGdiRect = 0x8000000c,
NvImageBlit = 0x8000000d,
+ NvSw = 0x8000000e,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de61f46..f954ad9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -187,7 +187,7 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
if (ret)
return false;
- NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
+ NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
/* Keep all lanes at the same level.. */
for (i = 0; i < nv_encoder->dp.link_nr; i++) {
@@ -228,7 +228,7 @@ nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
int dpe_headerlen, ret, i;
- NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
config[0], config[1], config[2], config[3]);
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
@@ -276,12 +276,12 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
bool cr_done, cr_max_vs, eq_done;
int ret = 0, i, tries, voltage;
- NV_DEBUG(dev, "link training!!\n");
+ NV_DEBUG_KMS(dev, "link training!!\n");
train:
cr_done = eq_done = false;
/* set link configuration */
- NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
+ NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
@@ -297,7 +297,7 @@ train:
return false;
/* clock recovery */
- NV_DEBUG(dev, "\tbegin cr\n");
+ NV_DEBUG_KMS(dev, "\tbegin cr\n");
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
if (ret)
goto stop;
@@ -314,7 +314,7 @@ train:
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
if (ret)
break;
- NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
status[0], status[1]);
cr_done = true;
@@ -346,7 +346,7 @@ train:
goto stop;
/* channel equalisation */
- NV_DEBUG(dev, "\tbegin eq\n");
+ NV_DEBUG_KMS(dev, "\tbegin eq\n");
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
if (ret)
goto stop;
@@ -357,7 +357,7 @@ train:
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
if (ret)
break;
- NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
status[0], status[1]);
eq_done = true;
@@ -395,9 +395,9 @@ stop:
/* retry at a lower setting, if possible */
if (!ret && !(eq_done && cr_done)) {
- NV_DEBUG(dev, "\twe failed\n");
+ NV_DEBUG_KMS(dev, "\twe failed\n");
if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
- NV_DEBUG(dev, "retry link training at low rate\n");
+ NV_DEBUG_KMS(dev, "retry link training at low rate\n");
nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
goto train;
}
@@ -418,7 +418,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
if (ret)
return false;
- NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
+ NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
"display: link_bw %d, link_nr %d version 0x%02x\n",
nv_encoder->dcb->dpconf.link_bw,
nv_encoder->dcb->dpconf.link_nr,
@@ -446,7 +446,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint32_t tmp, ctrl, stat = 0, data32[4] = {};
int ret = 0, i, index = auxch->rd;
- NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+ NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
@@ -472,7 +472,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
if (!(cmd & 1)) {
memcpy(data32, data, data_nr);
for (i = 0; i < 4; i++) {
- NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
+ NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
}
}
@@ -490,7 +490,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
nv_rd32(dev, NV50_AUXCH_CTRL(index)));
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
udelay(400);
@@ -502,9 +503,14 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
}
if (cmd & 1) {
+ if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
for (i = 0; i < 4; i++) {
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
- NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
+ NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
}
memcpy(data, data32, data_nr);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 35249c3..da3b93b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,6 +35,10 @@
#include "drm_pciids.h"
+MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
+int nouveau_ctxfw = 0;
+module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+
MODULE_PARM_DESC(noagp, "Disable AGP");
int nouveau_noagp;
module_param_named(noagp, nouveau_noagp, int, 0400);
@@ -52,7 +56,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify;
+int nouveau_vram_notify = 1;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -67,6 +71,18 @@ MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
int nouveau_uscript_tmds = -1;
module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
+int nouveau_ignorelid = 0;
+module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable all acceleration");
+int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -273,7 +289,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
chan = dev_priv->fifos[i];
- if (!chan)
+ if (!chan || !chan->pushbuf_bo)
continue;
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
@@ -341,7 +357,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 88b4c7b..1c15ef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,15 +54,24 @@ struct nouveau_fpriv {
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_bios.h"
+struct nouveau_grctx;
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_TILE_NR 15
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
#define NV50_VM_BLOCK (512*1024*1024ULL)
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_tile_reg {
+ struct nouveau_fence *fence;
+ uint32_t addr;
+ uint32_t size;
+ bool used;
+};
+
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@@ -82,6 +91,7 @@ struct nouveau_bo {
uint32_t tile_mode;
uint32_t tile_flags;
+ struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
struct drm_file *cpu_filp;
@@ -276,8 +286,13 @@ struct nouveau_timer_engine {
};
struct nouveau_fb_engine {
+ int num_tiles;
+
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_fifo_engine {
@@ -291,6 +306,8 @@ struct nouveau_fifo_engine {
void (*disable)(struct drm_device *);
void (*enable)(struct drm_device *);
bool (*reassign)(struct drm_device *, bool enable);
+ bool (*cache_flush)(struct drm_device *dev);
+ bool (*cache_pull)(struct drm_device *dev, bool enable);
int (*channel_id)(struct drm_device *);
@@ -317,6 +334,7 @@ struct nouveau_pgraph_engine {
bool accel_blocked;
void *ctxprog;
void *ctxvals;
+ int grctx_size;
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -328,6 +346,9 @@ struct nouveau_pgraph_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_engine {
@@ -488,6 +509,8 @@ struct drm_nouveau_private {
void __iomem *ramin;
uint32_t ramin_size;
+ struct nouveau_bo *vga_ram;
+
struct workqueue_struct *wq;
struct work_struct irq_work;
@@ -546,6 +569,12 @@ struct drm_nouveau_private {
unsigned long sg_handle;
} gart_info;
+ /* nv10-nv40 tiling regions */
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
+
/* G8x/G9x virtual address space */
uint64_t vm_gart_base;
uint64_t vm_gart_size;
@@ -554,6 +583,7 @@ struct drm_nouveau_private {
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
+ uint64_t vram_sys_base;
/* the mtrr covering the FB */
int fb_mtrr;
@@ -647,6 +677,10 @@ extern int nouveau_fbpercrtc;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
+extern int nouveau_ctxfw;
+extern int nouveau_ignorelid;
+extern int nouveau_nofbaccel;
+extern int nouveau_noaccel;
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -682,6 +716,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
+ uint32_t addr,
+ uint32_t size,
+ uint32_t pitch);
+extern void nv10_mem_expire_tiling(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
uint32_t size, uint32_t flags,
uint64_t phys);
@@ -710,7 +751,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
extern void nouveau_channel_free(struct nouveau_channel *);
-extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
extern int nouveau_gpuobj_early_init(struct drm_device *);
@@ -753,6 +793,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
uint32_t *o_ret);
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -801,6 +843,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
#endif
/* nouveau_dma.c */
+extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int size);
@@ -876,16 +919,22 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
+extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
+extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern bool nv04_fifo_cache_flush(struct drm_device *);
+extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
extern int nv04_fifo_channel_id(struct drm_device *);
extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -938,6 +987,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
extern void nv10_graph_context_switch(struct drm_device *);
+extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv20_graph.c */
extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -949,6 +1000,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
+extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_graph.c */
extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -959,9 +1012,9 @@ extern int nv40_graph_create_context(struct nouveau_channel *);
extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
-extern int nv40_grctx_init(struct drm_device *);
-extern void nv40_grctx_fini(struct drm_device *);
-extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv50_graph.c */
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -975,6 +1028,12 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
+/* nouveau_grctx.c */
+extern int nouveau_grctx_prog_load(struct drm_device *);
+extern void nouveau_grctx_vals_load(struct drm_device *,
+ struct nouveau_gpuobj *);
+extern void nouveau_grctx_fini(struct drm_device *);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
@@ -1023,8 +1082,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
/* nv04_dac.c */
extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
+extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
@@ -1042,9 +1100,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
/* nv17_tv.c */
extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask);
/* nv04_display.c */
extern int nv04_display_create(struct drm_device *);
@@ -1207,14 +1262,24 @@ static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
pci_name(d->pdev), ##arg)
#ifndef NV_DEBUG_NOTRACE
#define NV_DEBUG(d, fmt, arg...) do { \
- if (drm_debug) { \
+ if (drm_debug & DRM_UT_DRIVER) { \
+ NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
+ __LINE__, ##arg); \
+ } \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) { \
NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
__LINE__, ##arg); \
} \
} while (0)
#else
#define NV_DEBUG(d, fmt, arg...) do { \
- if (drm_debug) \
+ if (drm_debug & DRM_UT_DRIVER) \
+ NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) \
NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
} while (0)
#endif
@@ -1273,14 +1338,14 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
-#define NV50_NVSW 0x0000506e
-#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
-#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
-#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
-#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
-#define NV50_NVSW_DMA_VBLSEM 0x0000018c
-#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
-#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
-#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
+#define NV_SW 0x0000506e
+#define NV_SW_DMA_SEMAPHORE 0x00000060
+#define NV_SW_SEMAPHORE_OFFSET 0x00000064
+#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
+#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
+#define NV_SW_DMA_VBLSEM 0x0000018c
+#define NV_SW_VBLSEM_OFFSET 0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV_SW_VBLSEM_RELEASE 0x00000408
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 36e8c5e..ea879a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,14 +58,13 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
- if (!chan->accel_done ||
+ if (!chan || !chan->accel_done ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
if (RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
}
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -109,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static struct fb_ops nv04_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv04_fbcon_fillrect,
+ .fb_copyarea = nv04_fbcon_copyarea,
+ .fb_imageblit = nv04_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static struct fb_ops nv50_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv50_fbcon_fillrect,
+ .fb_copyarea = nv50_fbcon_copyarea,
+ .fb_imageblit = nv50_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -212,11 +238,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mode_cmd.bpp = surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+ mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
mode_cmd.depth = surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nvbo);
@@ -269,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
dev_priv->fbdev_info = info;
strcpy(info->fix.id, "nouveaufb");
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ if (nouveau_nofbaccel)
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -318,14 +348,18 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
par->nouveau_fb = nouveau_fb;
par->dev = dev;
- switch (dev_priv->card_type) {
- case NV_50:
- nv50_fbcon_accel_init(info);
- break;
- default:
- nv04_fbcon_accel_init(info);
- break;
- };
+ if (dev_priv->channel && !nouveau_nofbaccel) {
+ switch (dev_priv->card_type) {
+ case NV_50:
+ nv50_fbcon_accel_init(info);
+ info->fbops = &nv50_fbcon_ops;
+ break;
+ default:
+ nv04_fbcon_accel_init(info);
+ info->fbops = &nv04_fbcon_ops;
+ break;
+ };
+ }
nouveau_fbcon_zfill(dev);
@@ -347,7 +381,7 @@ out:
int
nouveau_fbcon_probe(struct drm_device *dev)
{
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
}
@@ -378,3 +412,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
return 0;
}
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 8531140..f9c34e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,8 +40,15 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
void nouveau_fbcon_zfill(struct drm_device *dev);
+void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
+void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
+void nouveau_fbcon_gpu_lockup(struct fb_info *info);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0cff7eb..faddf53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock_irqrestore(&chan->fence.lock, flags);
- BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
FIRE_RING(chan);
@@ -205,7 +205,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
schedule_timeout(1);
if (intr && signal_pending(current)) {
- ret = -ERESTART;
+ ret = -ERESTARTSYS;
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 11f831f..70cc308 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
}
struct validate_op {
- struct nouveau_fence *fence;
struct list_head vram_list;
struct list_head gart_list;
struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
}
static void
-validate_fini(struct validate_op *op, bool success)
+validate_fini(struct validate_op *op, struct nouveau_fence* fence)
{
- struct nouveau_fence *fence = op->fence;
-
- if (unlikely(!success))
- op->fence = NULL;
-
- validate_fini_list(&op->vram_list, op->fence);
- validate_fini_list(&op->gart_list, op->fence);
- validate_fini_list(&op->both_list, op->fence);
- nouveau_fence_unref((void *)&fence);
+ validate_fini_list(&op->vram_list, fence);
+ validate_fini_list(&op->gart_list, fence);
+ validate_fini_list(&op->both_list, fence);
}
static int
@@ -328,6 +321,7 @@ retry:
else {
NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
b->valid_domains);
+ list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
@@ -342,8 +336,6 @@ retry:
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- if (ret == -ERESTART)
- ret = -EAGAIN;
if (ret)
return ret;
goto retry;
@@ -422,10 +414,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
- ret = nouveau_fence_new(chan, &op->fence, false);
- if (ret)
- return ret;
-
if (nr_buffers == 0)
return 0;
@@ -479,13 +467,14 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
struct drm_nouveau_gem_pushbuf_bo *bo,
- int nr_relocs, uint64_t ptr_relocs,
- int nr_dwords, int first_dword,
+ unsigned nr_relocs, uint64_t ptr_relocs,
+ unsigned nr_dwords, unsigned first_dword,
uint32_t *pushbuf, bool is_iomem)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
struct drm_device *dev = chan->dev;
- int ret = 0, i;
+ int ret = 0;
+ unsigned i;
reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
@@ -543,6 +532,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
struct nouveau_channel *chan;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
uint32_t *pushbuf = NULL;
int ret = 0, do_reloc = 0, i;
@@ -599,7 +589,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
OUT_RINGp(chan, pushbuf, req->nr_dwords);
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -607,7 +597,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(op.fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, NULL, false, false);
if (ret) {
for (i = 0; i < req->nr_dwords; i++)
NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -616,7 +606,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(pushbuf);
kfree(bo);
@@ -636,6 +627,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *pbbo;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
int i, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -677,6 +669,18 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
pbbo = nouveau_gem_object(gem);
+ if ((req->offset & 3) || req->nr_dwords < 2 ||
+ (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
+ (unsigned long)req->nr_dwords >
+ ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
+ NV_ERROR(dev, "pb call misaligned or out of bounds: "
+ "%d + %d * 4 > %ld\n",
+ req->offset, req->nr_dwords, pbbo->bo.mem.size);
+ ret = -EINVAL;
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
chan->fence.sequence);
if (ret) {
@@ -774,7 +778,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
OUT_RING(chan, 0);
}
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -782,7 +786,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
@@ -915,19 +920,16 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
goto out;
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
- if (ret == -ERESTART)
- ret = -EAGAIN;
if (ret)
goto out;
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ spin_unlock(&nvbo->bo.lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
- if (ret == -ERESTART)
- ret = -EAGAIN;
- else
if (ret == 0)
nvbo->cpu_filp = file_priv;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
new file mode 100644
index 0000000..c7ebec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+struct nouveau_ctxprog {
+ uint32_t signature;
+ uint8_t version;
+ uint16_t length;
+ uint32_t data[];
+} __attribute__ ((packed));
+
+struct nouveau_ctxvals {
+ uint32_t signature;
+ uint8_t version;
+ uint32_t length;
+ struct {
+ uint32_t offset;
+ uint32_t value;
+ } data[];
+} __attribute__ ((packed));
+
+int
+nouveau_grctx_prog_load(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ const int chipset = dev_priv->chipset;
+ const struct firmware *fw;
+ const struct nouveau_ctxprog *cp;
+ const struct nouveau_ctxvals *cv;
+ char name[32];
+ int ret, i;
+
+ if (pgraph->accel_blocked)
+ return -ENODEV;
+
+ if (!pgraph->ctxprog) {
+ sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
+ return ret;
+ }
+
+ pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxprog, fw->data, fw->size);
+
+ cp = pgraph->ctxprog;
+ if (le32_to_cpu(cp->signature) != 0x5043564e ||
+ cp->version != 0 ||
+ le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
+ NV_ERROR(dev, "ctxprog invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ if (!pgraph->ctxvals) {
+ sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
+ nouveau_grctx_fini(dev);
+ return ret;
+ }
+
+ pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxvals) {
+ NV_ERROR(dev, "OOM copying ctxvals\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxvals, fw->data, fw->size);
+
+ cv = (void *)pgraph->ctxvals;
+ if (le32_to_cpu(cv->signature) != 0x5643564e ||
+ cv->version != 0 ||
+ le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
+ NV_ERROR(dev, "ctxvals invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ cp = pgraph->ctxprog;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < le16_to_cpu(cp->length); i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
+ le32_to_cpu(cp->data[i]));
+
+ return 0;
+}
+
+void
+nouveau_grctx_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ if (pgraph->ctxprog) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxprog = NULL;
+ }
+
+ if (pgraph->ctxvals) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxvals = NULL;
+ }
+}
+
+void
+nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_ctxvals *cv = pgraph->ctxvals;
+ int i;
+
+ if (!cv)
+ return;
+
+ for (i = 0; i < le32_to_cpu(cv->length); i++)
+ nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
+ le32_to_cpu(cv->data[i].value));
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
new file mode 100644
index 0000000..5d39c4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -0,0 +1,133 @@
+#ifndef __NOUVEAU_GRCTX_H__
+#define __NOUVEAU_GRCTX_H__
+
+struct nouveau_grctx {
+ struct drm_device *dev;
+
+ enum {
+ NOUVEAU_GRCTX_PROG,
+ NOUVEAU_GRCTX_VALS
+ } mode;
+ void *data;
+
+ uint32_t ctxprog_max;
+ uint32_t ctxprog_len;
+ uint32_t ctxprog_reg;
+ int ctxprog_label[32];
+ uint32_t ctxvals_pos;
+ uint32_t ctxvals_base;
+};
+
+#ifdef CP_CTX
+static inline void
+cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+{
+ uint32_t *ctxprog = ctx->data;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
+ ctxprog[ctx->ctxprog_len++] = inst;
+}
+
+static inline void
+cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+{
+ cp_out(ctx, CP_LOAD_SR | val);
+}
+
+static inline void
+cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+{
+ ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
+
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+ ctx->ctxvals_pos = ctx->ctxvals_base + length;
+
+ if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
+ cp_lsr(ctx, length);
+ length = 0;
+ }
+
+ cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
+}
+
+static inline void
+cp_name(struct nouveau_grctx *ctx, int name)
+{
+ uint32_t *ctxprog = ctx->data;
+ int i;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ ctx->ctxprog_label[name] = ctx->ctxprog_len;
+ for (i = 0; i < ctx->ctxprog_len; i++) {
+ if ((ctxprog[i] & 0xfff00000) != 0xff400000)
+ continue;
+ if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
+ continue;
+ ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
+ (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
+ }
+}
+
+static inline void
+_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+{
+ int ip = 0;
+
+ if (mod != 2) {
+ ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
+ if (ip == 0)
+ ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
+ }
+
+ cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
+ (state ? 0 : CP_BRA_IF_CLEAR));
+}
+#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#ifdef CP_BRA_MOD
+#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
+#endif
+
+static inline void
+_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
+}
+#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
+}
+#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+cp_pos(struct nouveau_grctx *ctx, int offset)
+{
+ ctx->ctxvals_pos = offset;
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+
+ cp_lsr(ctx, ctx->ctxvals_pos);
+ cp_out(ctx, CP_SET_CONTEXT_POINTER);
+}
+
+static inline void
+gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+{
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ reg = (reg - 0x00400000) / 4;
+ reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
+
+ nv_wo32(ctx->dev, ctx->data, reg, val);
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index a2c30f4..475ba81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 370c72c..447f9f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
get + 4);
}
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
if (status) {
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
@@ -483,6 +497,13 @@ nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (nouveau_pgraph_intr_swmthd(dev, &trap))
unhandled = 1;
+ } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ uint32_t v = nv_rd32(dev, 0x402000);
+ nv_wr32(dev, 0x402000, v);
+
+ /* dump the error anyway for now: it's useful for
+ Gallium development */
+ unhandled = 1;
} else {
unhandled = 1;
}
@@ -559,85 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
- uint32_t status, nsource;
+ uint32_t status;
- status = nv_rd32(dev, NV03_PGRAPH_INTR);
- nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) &
+ ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
- nv50_graph_context_switch(dev);
+ nv50_graph_context_switch(dev);
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
- if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
-
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900,
+ nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08,
+ nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900,
+ nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08,
+ nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
+ status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
- {
- const int isb = (1 << 16) | (1 << 0);
+ {
+ const int isb = (1 << 16) | (1 << 0);
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500,
+ nv_rd32(dev, 0x400500) | isb);
+ }
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 0275571..2dc09db 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
}
/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr;
+ tile->size = size;
+ tile->used = !!pitch;
+ nouveau_fence_unref((void **)&tile->fence);
+
+ if (!pfifo->cache_flush(dev))
+ return;
+
+ pfifo->reassign(dev, false);
+ pfifo->cache_flush(dev);
+ pfifo->cache_pull(dev, false);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_region_tiling(dev, i, addr, size, pitch);
+
+ pfifo->cache_pull(dev, true);
+ pfifo->reassign(dev, true);
+}
+
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
+ int i;
+
+ spin_lock(&dev_priv->tile.lock);
+
+ for (i = 0; i < pfb->num_tiles; i++) {
+ if (tile[i].used)
+ /* Tile region in use. */
+ continue;
+
+ if (tile[i].fence &&
+ !nouveau_fence_signalled(tile[i].fence, NULL))
+ /* Pending tile region. */
+ continue;
+
+ if (max(tile[i].addr, addr) <
+ min(tile[i].addr + tile[i].size, addr + size))
+ /* Kill an intersecting tile region. */
+ nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
+
+ if (pitch && !found) {
+ /* Free tile region. */
+ nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
+ found = &tile[i];
+ }
+ }
+
+ spin_unlock(&dev_priv->tile.lock);
+
+ return found;
+}
+
+void
+nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
+{
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
+ }
+
+ tile->used = false;
+}
+
+/*
* NV50 VM helpers
*/
int
@@ -199,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
uint32_t flags, uint64_t phys)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj **pgt;
- unsigned psz, pfl, pages;
-
- if (virt >= dev_priv->vm_gart_base &&
- (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
- psz = 12;
- pgt = &dev_priv->gart_info.sg_ctxdma;
- pfl = 0x21;
- virt -= dev_priv->vm_gart_base;
- } else
- if (virt >= dev_priv->vm_vram_base &&
- (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
- psz = 16;
- pgt = dev_priv->vm_vram_pt;
- pfl = 0x01;
- virt -= dev_priv->vm_vram_base;
- } else {
- NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
- virt, virt + size - 1);
- return -EINVAL;
+ struct nouveau_gpuobj *pgt;
+ unsigned block;
+ int i;
+
+ virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
+ size = (size >> 16) << 1;
+
+ phys |= ((uint64_t)flags << 32);
+ phys |= 1;
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ phys |= 0x30;
}
- pages = size >> psz;
-
dev_priv->engine.instmem.prepare_access(dev, true);
- if (flags & 0x80000000) {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+ while (size) {
+ unsigned offset_h = upper_32_bits(phys);
+ unsigned offset_l = lower_32_bits(phys);
+ unsigned pte, end;
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 1);
+ if (size >= block && !(virt & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
- nv_wo32(dev, pt, pte++, 0x00000000);
- nv_wo32(dev, pt, pte++, 0x00000000);
+ phys += block << 15;
+ size -= block;
- virt += (1 << psz);
- }
- } else {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
- unsigned offset_h = upper_32_bits(phys) & 0xff;
- unsigned offset_l = lower_32_bits(phys);
+ while (block) {
+ pgt = dev_priv->vm_vram_pt[virt >> 14];
+ pte = virt & 0x3ffe;
- nv_wo32(dev, pt, pte++, offset_l | pfl);
- nv_wo32(dev, pt, pte++, offset_h | flags);
+ end = pte + block;
+ if (end > 16384)
+ end = 16384;
+ block -= (end - pte);
+ virt += (end - pte);
- phys += (1 << psz);
- virt += (1 << psz);
+ while (pte < end) {
+ nv_wo32(dev, pgt, pte++, offset_l);
+ nv_wo32(dev, pgt, pte++, offset_h);
+ }
}
}
dev_priv->engine.instmem.finish_access(dev);
@@ -270,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
void
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
{
- nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pgt;
+ unsigned pages, pte, end;
+
+ virt -= dev_priv->vm_vram_base;
+ pages = (size >> 16) << 1;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pages) {
+ pgt = dev_priv->vm_vram_pt[virt >> 29];
+ pte = (virt & 0x1ffe0000ULL) >> 15;
+
+ end = pte + pages;
+ if (end > 16384)
+ end = 16384;
+ pages -= (end - pte);
+ virt += (end - pte) << 15;
+
+ while (pte < end)
+ nv_wo32(dev, pgt, pte++, 0);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ }
}
/*
@@ -297,9 +414,8 @@ void nouveau_mem_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ nouveau_bo_unpin(dev_priv->vga_ram);
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -407,6 +523,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
return 0;
}
+#if __OS_HAS_AGP
static void nouveau_mem_reset_agp(struct drm_device *dev)
{
uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
@@ -432,10 +549,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
}
+#endif
int
nouveau_mem_init_agp(struct drm_device *dev)
{
+#if __OS_HAS_AGP
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -471,6 +590,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_AGP;
dev_priv->gart_info.aper_base = info.aperture_base;
dev_priv->gart_info.aper_size = info.aperture_size;
+#endif
return 0;
}
@@ -509,6 +629,7 @@ nouveau_mem_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
spin_lock_init(&dev_priv->ttm.bo_list_lock);
+ spin_lock_init(&dev_priv->tile.lock);
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
@@ -531,6 +652,15 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
+
/* GART */
#if !defined(__powerpc__) && !defined(__ia64__)
if (drm_device_is_agp(dev) && dev->agp) {
@@ -562,6 +692,7 @@ nouveau_mem_init(struct drm_device *dev)
dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
drm_get_resource_len(dev, 1),
DRM_MTRR_WC);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34..d99dc08 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
+ uint32_t flags;
int ret;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
- TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ if (nouveau_vram_notify)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
0, 0x0000, false, true, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(ntfy, flags);
if (ret)
goto out_err;
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
+ if (dev_priv->card_type >= NV_50)
+ offset += dev_priv->vm_gart_base;
}
} else {
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 93379bb..e7c100b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -881,15 +881,16 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
return 0;
}
-static int
+int
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj_ret)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_nouveau_private *dev_priv;
struct nouveau_gpuobj *gpuobj;
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
return -EINVAL;
+ dev_priv = chan->dev->dev_private;
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index fa1b0e7..aa9b310 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
* the card will hang early on in the X init process.
*/
# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_GRAPH_UNITS 0x00001540
#define NV40_PMC_BACKLIGHT 0x000015f0
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
#define NV40_PMC_1700 0x00001700
@@ -349,19 +350,19 @@
#define NV04_PGRAPH_BLEND 0x00400824
#define NV04_PGRAPH_STORED_FMT 0x00400830
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
-#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
-#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
-#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
-#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
#define NV04_PGRAPH_U_RAM 0x00400D00
-#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
-#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
-#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
-#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e4..ed15905 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- NV_DEBUG(nvbe->dev, "\n");
+ struct drm_device *dev;
if (nvbe && nvbe->pages) {
+ dev = nvbe->dev;
+ NV_DEBUG(dev, "\n");
+
if (nvbe->bound)
be->func->unbind(be);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 2ed41d3..a4851af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv04_fifo_channel_id;
engine->fifo.create_context = nv04_fifo_create_context;
engine->fifo.destroy_context = nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv10_graph_grclass;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
+ engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv20_graph_grclass;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv30_graph_grclass;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
+ engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
engine->graph.grclass = nv40_graph_grclass;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
+ engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -292,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
+ struct drm_device *dev = priv;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ nv_wr32(dev, 0x88054, state);
+ else
+ nv_wr32(dev, 0x1854, state);
+
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -299,12 +325,57 @@ nouveau_vga_set_decode(void *priv, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static int
+nouveau_card_init_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+ (struct drm_file *)-2,
+ NvDmaFB, NvDmaTT);
+ if (ret)
+ return ret;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, nouveau_mem_fb_amount(dev),
+ NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ &gpuobj);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ return 0;
+out_err:
+ nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ return ret;
+}
+
int
nouveau_card_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine;
- struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
@@ -317,7 +388,7 @@ nouveau_card_init(struct drm_device *dev)
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
if (ret)
- return ret;
+ goto out;
engine = &dev_priv->engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
@@ -325,12 +396,12 @@ nouveau_card_init(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = nouveau_bios_init(dev);
if (ret)
- return ret;
+ goto out;
}
ret = nouveau_gpuobj_early_init(dev);
if (ret)
- return ret;
+ goto out_bios;
/* Initialise instance memory, must happen before mem_init so we
* know exactly how much VRAM we're able to use for "normal"
@@ -338,100 +409,72 @@ nouveau_card_init(struct drm_device *dev)
*/
ret = engine->instmem.init(dev);
if (ret)
- return ret;
+ goto out_gpuobj_early;
/* Setup the memory manager */
ret = nouveau_mem_init(dev);
if (ret)
- return ret;
+ goto out_instmem;
ret = nouveau_gpuobj_init(dev);
if (ret)
- return ret;
+ goto out_mem;
/* PMC */
ret = engine->mc.init(dev);
if (ret)
- return ret;
+ goto out_gpuobj;
/* PTIMER */
ret = engine->timer.init(dev);
if (ret)
- return ret;
+ goto out_mc;
/* PFB */
ret = engine->fb.init(dev);
if (ret)
- return ret;
+ goto out_timer;
- /* PGRAPH */
- ret = engine->graph.init(dev);
- if (ret)
- return ret;
+ if (nouveau_noaccel)
+ engine->graph.accel_blocked = true;
+ else {
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ goto out_fb;
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- return ret;
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+ }
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
*/
ret = drm_irq_install(dev);
if (ret)
- return ret;
+ goto out_fifo;
ret = drm_vblank_init(dev, 0);
if (ret)
- return ret;
+ goto out_irq;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
- ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2,
- NvDmaFB, NvDmaTT);
- if (ret)
- return ret;
-
- gpuobj = NULL;
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, nouveau_mem_fb_amount(dev),
- NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
- &gpuobj);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
- gpuobj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
- }
-
- gpuobj = NULL;
- ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &gpuobj, NULL);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
- gpuobj, NULL);
- if (ret) {
- nouveau_gpuobj_del(dev, &gpuobj);
- return ret;
+ if (!engine->graph.accel_blocked) {
+ ret = nouveau_card_init_channel(dev);
+ if (ret)
+ goto out_irq;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->card_type >= NV_50)
ret = nv50_display_create(dev);
- if (ret)
- return ret;
- } else {
+ else
ret = nv04_display_create(dev);
- if (ret)
- return ret;
- }
+ if (ret)
+ goto out_irq;
}
ret = nouveau_backlight_init(dev);
@@ -444,6 +487,34 @@ nouveau_card_init(struct drm_device *dev)
drm_helper_initial_config(dev);
return 0;
+
+out_irq:
+ drm_irq_uninstall(dev);
+out_fifo:
+ if (!nouveau_noaccel)
+ engine->fifo.takedown(dev);
+out_graph:
+ if (!nouveau_noaccel)
+ engine->graph.takedown(dev);
+out_fb:
+ engine->fb.takedown(dev);
+out_timer:
+ engine->timer.takedown(dev);
+out_mc:
+ engine->mc.takedown(dev);
+out_gpuobj:
+ nouveau_gpuobj_takedown(dev);
+out_mem:
+ nouveau_mem_close(dev);
+out_instmem:
+ engine->instmem.takedown(dev);
+out_gpuobj_early:
+ nouveau_gpuobj_late_takedown(dev);
+out_bios:
+ nouveau_bios_takedown(dev);
+out:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ return ret;
}
static void nouveau_card_takedown(struct drm_device *dev)
@@ -461,13 +532,16 @@ static void nouveau_card_takedown(struct drm_device *dev)
dev_priv->channel = NULL;
}
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
engine->fb.takedown(dev);
engine->timer.takedown(dev);
engine->mc.takedown(dev);
mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_sgdma_takedown(dev);
@@ -585,7 +659,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->chipset = (reg0 & 0xff00000) >> 20;
/* NV04 or NV05 */
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
- dev_priv->chipset = 0x04;
+ if (reg0 & 0x00f00000)
+ dev_priv->chipset = 0x05;
+ else
+ dev_priv->chipset = 0x04;
} else
dev_priv->chipset = 0xff;
@@ -665,8 +742,8 @@ static void nouveau_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* In the case of an error dev_priv may not be be allocated yet */
- if (dev_priv && dev_priv->card_type)
+ /* In the case of an error dev_priv may not be allocated yet */
+ if (dev_priv)
nouveau_card_takedown(dev);
}
@@ -756,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
default:
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 187eb84..c385d50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,45 +28,17 @@
#include "nouveau_drv.h"
-static struct vm_operations_struct nouveau_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int
-nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- int ret;
-
- if (unlikely(bo == NULL))
- return VM_FAULT_NOPAGE;
-
- ret = ttm_vm_ops->fault(vma, vmf);
- return ret;
-}
-
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct drm_nouveau_private *dev_priv =
file_priv->minor->dev->dev_private;
- int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
- ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- nouveau_ttm_vm_ops = *ttm_vm_ops;
- nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
- }
-
- vma->vm_ops = &nouveau_ttm_vm_ops;
- return 0;
+ return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index b913636..d2f143e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -143,10 +143,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
if (pv->NM2)
- NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+ NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
else
- NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
+ NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
pv->N1, pv->M1, pv->log2P);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -160,7 +160,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
- NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+ NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
nv_crtc->index);
if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
@@ -603,7 +603,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -703,7 +703,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- NV_DEBUG(crtc->dev, "\n");
+ NV_DEBUG_KMS(crtc->dev, "\n");
if (!nv_crtc)
return;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index a5fa517..1d73b15 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- uint8_t saved_seq1, saved_pi, saved_rpc1;
+ uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
/* only implemented for head A for now */
NVSetOwner(dev, 0);
+ saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
@@ -203,25 +206,25 @@ out:
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
- NV_TRACE(dev, "Load detected on head A\n");
+ NV_INFO(dev, "Load detected on head A\n");
return connector_status_connected;
}
return connector_status_disconnected;
}
-enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
- uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
- int head, present = 0;
+ int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
if (dcb->type == OUTPUT_TV) {
@@ -287,13 +290,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
msleep(5);
- temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
-
- if (dcb->type == OUTPUT_TV)
- present = (nv17_tv_detect(encoder, connector, temp)
- == connector_status_connected);
- else
- present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+ sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +307,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
- if (present) {
- NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ uint32_t sample = nv17_dac_sample_load(encoder);
+
+ if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
}
-
- return connector_status_disconnected;
}
-
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -350,14 +357,10 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int head = nouveau_crtc(encoder->crtc)->index;
- NV_TRACE(dev, "%s called for encoder %d\n", __func__,
- nv_encoder->dcb->index);
-
if (nv_gf4_disp_arch(dev)) {
struct drm_encoder *rebind;
uint32_t dac_offset = nv04_dac_output_offset(encoder);
@@ -466,7 +469,7 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index e5b3333..483f875 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -261,7 +261,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *output_mode = &nv_encoder->mode;
uint32_t mode_ratio, panel_ratio;
- NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(output_mode);
/* Initialize the FP registers in this CRTC. */
@@ -413,7 +413,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
- NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -550,7 +552,7 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index b47c757..ef77215 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -99,10 +99,11 @@ nv04_display_create(struct drm_device *dev)
uint16_t connector[16] = { 0 };
int i, ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (nv_two_heads(dev))
nv04_display_store_initial_head_owner(dev);
+ nouveau_hw_save_vga_fonts(dev, 1);
drm_mode_config_init(dev);
drm_mode_create_scaling_mode_property(dev);
@@ -203,8 +204,6 @@ nv04_display_create(struct drm_device *dev)
/* Save previous state */
NVLockVgaCrtcs(dev, false);
- nouveau_hw_save_vga_fonts(dev, 1);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -223,7 +222,7 @@ nv04_display_destroy(struct drm_device *dev)
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -246,9 +245,9 @@ nv04_display_destroy(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
- nouveau_hw_save_vga_fonts(dev, 0);
-
drm_mode_config_cleanup(dev);
+
+ nouveau_hw_save_vga_fonts(dev, 0);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 09a3107..fd01caa 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -55,21 +54,19 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
if (info->state != FBINFO_STATE_RUNNING)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,14 +77,18 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
- OUT_RING(chan, color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int iter_len = dsize > 128 ? 128 : dsize;
if (RING_SPACE(chan, iter_len + 1)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ const int sub = NvSubCtxSurf2D;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
return ret;
if (RING_SPACE(chan, 49)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, 1, 0x0184, 2);
+ BEGIN_RING(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, 1, 0x0300, 4);
+ BEGIN_RING(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, 1, 0x0300, 1);
+ BEGIN_RING(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, 1, 0x0300, 8);
+ BEGIN_RING(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, 1, 0x0300, 2);
+ BEGIN_RING(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
@@ -308,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
FIRE_RING(chan);
- info->fbops->fb_fillrect = nv04_fbcon_fillrect;
- info->fbops->fb_copyarea = nv04_fbcon_copyarea;
- info->fbops->fb_imageblit = nv04_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 0c3cd53..f31347b 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
return (reassign == 1);
}
+bool
+nv04_fifo_cache_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
+ return true;
+
+ } while (ptimer->read(dev) - start < 100000000);
+
+ NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
+
+ return false;
+}
+
+bool
+nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+
+ if (enable) {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
+ } else {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ }
+
+ return !!(pull & 1);
+}
+
int
nv04_fifo_channel_id(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 396ee92..e260986 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,6 +28,10 @@
#include "nouveau_drv.h"
static uint32_t nv04_graph_ctx_regs[] = {
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
NV04_PGRAPH_CTX_SWITCH1,
NV04_PGRAPH_CTX_SWITCH2,
NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
NV04_PGRAPH_PATT_COLOR0,
NV04_PGRAPH_PATT_COLOR1,
NV04_PGRAPH_PATT_COLORRAM+0x00,
- NV04_PGRAPH_PATT_COLORRAM+0x01,
- NV04_PGRAPH_PATT_COLORRAM+0x02,
- NV04_PGRAPH_PATT_COLORRAM+0x03,
NV04_PGRAPH_PATT_COLORRAM+0x04,
- NV04_PGRAPH_PATT_COLORRAM+0x05,
- NV04_PGRAPH_PATT_COLORRAM+0x06,
- NV04_PGRAPH_PATT_COLORRAM+0x07,
NV04_PGRAPH_PATT_COLORRAM+0x08,
- NV04_PGRAPH_PATT_COLORRAM+0x09,
- NV04_PGRAPH_PATT_COLORRAM+0x0A,
- NV04_PGRAPH_PATT_COLORRAM+0x0B,
- NV04_PGRAPH_PATT_COLORRAM+0x0C,
- NV04_PGRAPH_PATT_COLORRAM+0x0D,
- NV04_PGRAPH_PATT_COLORRAM+0x0E,
- NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x0c,
NV04_PGRAPH_PATT_COLORRAM+0x10,
- NV04_PGRAPH_PATT_COLORRAM+0x11,
- NV04_PGRAPH_PATT_COLORRAM+0x12,
- NV04_PGRAPH_PATT_COLORRAM+0x13,
NV04_PGRAPH_PATT_COLORRAM+0x14,
- NV04_PGRAPH_PATT_COLORRAM+0x15,
- NV04_PGRAPH_PATT_COLORRAM+0x16,
- NV04_PGRAPH_PATT_COLORRAM+0x17,
NV04_PGRAPH_PATT_COLORRAM+0x18,
- NV04_PGRAPH_PATT_COLORRAM+0x19,
- NV04_PGRAPH_PATT_COLORRAM+0x1A,
- NV04_PGRAPH_PATT_COLORRAM+0x1B,
- NV04_PGRAPH_PATT_COLORRAM+0x1C,
- NV04_PGRAPH_PATT_COLORRAM+0x1D,
- NV04_PGRAPH_PATT_COLORRAM+0x1E,
- NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x1c,
NV04_PGRAPH_PATT_COLORRAM+0x20,
- NV04_PGRAPH_PATT_COLORRAM+0x21,
- NV04_PGRAPH_PATT_COLORRAM+0x22,
- NV04_PGRAPH_PATT_COLORRAM+0x23,
NV04_PGRAPH_PATT_COLORRAM+0x24,
- NV04_PGRAPH_PATT_COLORRAM+0x25,
- NV04_PGRAPH_PATT_COLORRAM+0x26,
- NV04_PGRAPH_PATT_COLORRAM+0x27,
NV04_PGRAPH_PATT_COLORRAM+0x28,
- NV04_PGRAPH_PATT_COLORRAM+0x29,
- NV04_PGRAPH_PATT_COLORRAM+0x2A,
- NV04_PGRAPH_PATT_COLORRAM+0x2B,
- NV04_PGRAPH_PATT_COLORRAM+0x2C,
- NV04_PGRAPH_PATT_COLORRAM+0x2D,
- NV04_PGRAPH_PATT_COLORRAM+0x2E,
- NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x2c,
NV04_PGRAPH_PATT_COLORRAM+0x30,
- NV04_PGRAPH_PATT_COLORRAM+0x31,
- NV04_PGRAPH_PATT_COLORRAM+0x32,
- NV04_PGRAPH_PATT_COLORRAM+0x33,
NV04_PGRAPH_PATT_COLORRAM+0x34,
- NV04_PGRAPH_PATT_COLORRAM+0x35,
- NV04_PGRAPH_PATT_COLORRAM+0x36,
- NV04_PGRAPH_PATT_COLORRAM+0x37,
NV04_PGRAPH_PATT_COLORRAM+0x38,
- NV04_PGRAPH_PATT_COLORRAM+0x39,
- NV04_PGRAPH_PATT_COLORRAM+0x3A,
- NV04_PGRAPH_PATT_COLORRAM+0x3B,
- NV04_PGRAPH_PATT_COLORRAM+0x3C,
- NV04_PGRAPH_PATT_COLORRAM+0x3D,
- NV04_PGRAPH_PATT_COLORRAM+0x3E,
- NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATT_COLORRAM+0x3c,
+ NV04_PGRAPH_PATT_COLORRAM+0x40,
+ NV04_PGRAPH_PATT_COLORRAM+0x44,
+ NV04_PGRAPH_PATT_COLORRAM+0x48,
+ NV04_PGRAPH_PATT_COLORRAM+0x4c,
+ NV04_PGRAPH_PATT_COLORRAM+0x50,
+ NV04_PGRAPH_PATT_COLORRAM+0x54,
+ NV04_PGRAPH_PATT_COLORRAM+0x58,
+ NV04_PGRAPH_PATT_COLORRAM+0x5c,
+ NV04_PGRAPH_PATT_COLORRAM+0x60,
+ NV04_PGRAPH_PATT_COLORRAM+0x64,
+ NV04_PGRAPH_PATT_COLORRAM+0x68,
+ NV04_PGRAPH_PATT_COLORRAM+0x6c,
+ NV04_PGRAPH_PATT_COLORRAM+0x70,
+ NV04_PGRAPH_PATT_COLORRAM+0x74,
+ NV04_PGRAPH_PATT_COLORRAM+0x78,
+ NV04_PGRAPH_PATT_COLORRAM+0x7c,
+ NV04_PGRAPH_PATT_COLORRAM+0x80,
+ NV04_PGRAPH_PATT_COLORRAM+0x84,
+ NV04_PGRAPH_PATT_COLORRAM+0x88,
+ NV04_PGRAPH_PATT_COLORRAM+0x8c,
+ NV04_PGRAPH_PATT_COLORRAM+0x90,
+ NV04_PGRAPH_PATT_COLORRAM+0x94,
+ NV04_PGRAPH_PATT_COLORRAM+0x98,
+ NV04_PGRAPH_PATT_COLORRAM+0x9c,
+ NV04_PGRAPH_PATT_COLORRAM+0xa0,
+ NV04_PGRAPH_PATT_COLORRAM+0xa4,
+ NV04_PGRAPH_PATT_COLORRAM+0xa8,
+ NV04_PGRAPH_PATT_COLORRAM+0xac,
+ NV04_PGRAPH_PATT_COLORRAM+0xb0,
+ NV04_PGRAPH_PATT_COLORRAM+0xb4,
+ NV04_PGRAPH_PATT_COLORRAM+0xb8,
+ NV04_PGRAPH_PATT_COLORRAM+0xbc,
+ NV04_PGRAPH_PATT_COLORRAM+0xc0,
+ NV04_PGRAPH_PATT_COLORRAM+0xc4,
+ NV04_PGRAPH_PATT_COLORRAM+0xc8,
+ NV04_PGRAPH_PATT_COLORRAM+0xcc,
+ NV04_PGRAPH_PATT_COLORRAM+0xd0,
+ NV04_PGRAPH_PATT_COLORRAM+0xd4,
+ NV04_PGRAPH_PATT_COLORRAM+0xd8,
+ NV04_PGRAPH_PATT_COLORRAM+0xdc,
+ NV04_PGRAPH_PATT_COLORRAM+0xe0,
+ NV04_PGRAPH_PATT_COLORRAM+0xe4,
+ NV04_PGRAPH_PATT_COLORRAM+0xe8,
+ NV04_PGRAPH_PATT_COLORRAM+0xec,
+ NV04_PGRAPH_PATT_COLORRAM+0xf0,
+ NV04_PGRAPH_PATT_COLORRAM+0xf4,
+ NV04_PGRAPH_PATT_COLORRAM+0xf8,
+ NV04_PGRAPH_PATT_COLORRAM+0xfc,
NV04_PGRAPH_PATTERN,
0x0040080c,
NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x004004f8,
0x0040047c,
0x004004fc,
- 0x0040053c,
- 0x00400544,
- 0x00400540,
- 0x00400548,
- 0x00400560,
- 0x00400568,
- 0x00400564,
- 0x0040056c,
0x00400534,
0x00400538,
0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
- NV04_PGRAPH_VALID2
-
-
+ NV04_PGRAPH_VALID2,
+ NV04_PGRAPH_DEBUG_3
};
struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
pgraph->fifo_access(dev, true);
}
+static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+ if (nv04_graph_ctx_regs[i] == reg)
+ return &ctx->nv04[i];
+ }
+
+ return NULL;
+}
+
int nv04_graph_create_context(struct nouveau_channel *chan)
{
struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
if (pgraph_ctx == NULL)
return -ENOMEM;
- /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
- pgraph_ctx->nv04[0] = 0x0001ffff;
- /* is it really needed ??? */
-#if 0
- dev_priv->fifos[channel].pgraph_ctx[1] =
- nv_rd32(dev, NV_PGRAPH_DEBUG_4);
- dev_priv->fifos[channel].pgraph_ctx[2] =
- nv_rd32(dev, 0x004006b0);
-#endif
+ *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
return 0;
}
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
+
tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+
return 0;
}
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= dev_priv->engine.fifo.channels << 24;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
uint32_t tmp;
@@ -543,11 +547,11 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
return 0;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
};
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, nv04_graph_mthds_m2mf },
+ { 0x0039, false, NULL },
{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
{ 0x0053, false, NULL }, /* surf3d */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a20c206..a3b9563 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -30,7 +30,7 @@ nv04_instmem_determine_amount(struct drm_device *dev)
* of vram. For now, only reserve a small piece until we know
* more about what each chipset requires.
*/
- switch (dev_priv->chipset & 0xf0) {
+ switch (dev_priv->chipset) {
case 0x40:
case 0x47:
case 0x49:
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 79e2d10..cc5cda4 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,17 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch) {
+ if (dev_priv->card_type >= NV_20)
+ addr |= 1;
+ else
+ addr |= 1 << 31;
+ }
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+}
+
int
nv10_fb_init(struct drm_device *dev)
{
- uint32_t fb_bar_size;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i;
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6bf6804..fcf2cdd 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -389,49 +389,50 @@ struct graph_state {
int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
struct pipe_state pipe_state;
+ uint32_t lma_window[4];
};
+#define PIPE_SAVE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+#define PIPE_RESTORE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ } while (0)
+
static void nv10_graph_save_pipe(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- int i;
-#define PIPE_SAVE(addr) \
- do { \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
- fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
- } while (0)
-
- PIPE_SAVE(0x4400);
- PIPE_SAVE(0x0200);
- PIPE_SAVE(0x6400);
- PIPE_SAVE(0x6800);
- PIPE_SAVE(0x6c00);
- PIPE_SAVE(0x7000);
- PIPE_SAVE(0x7400);
- PIPE_SAVE(0x7800);
- PIPE_SAVE(0x0040);
- PIPE_SAVE(0x0000);
-
-#undef PIPE_SAVE
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
}
static void nv10_graph_load_pipe(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- int i;
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
uint32_t xfmode0, xfmode1;
-#define PIPE_RESTORE(addr) \
- do { \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
- } while (0)
-
+ int i;
nouveau_wait_for_idle(dev);
/* XXX check haiku comments */
@@ -457,24 +458,22 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
- PIPE_RESTORE(0x0200);
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
nouveau_wait_for_idle(dev);
/* restore XFMODE */
nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
- PIPE_RESTORE(0x6400);
- PIPE_RESTORE(0x6800);
- PIPE_RESTORE(0x6c00);
- PIPE_RESTORE(0x7000);
- PIPE_RESTORE(0x7400);
- PIPE_RESTORE(0x7800);
- PIPE_RESTORE(0x4400);
- PIPE_RESTORE(0x0000);
- PIPE_RESTORE(0x0040);
+ PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
nouveau_wait_for_idle(dev);
-
-#undef PIPE_RESTORE
}
static void nv10_graph_create_pipe(struct nouveau_channel *chan)
@@ -808,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
chan->pgraph_ctx = NULL;
}
+void
+nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1 << 31;
+
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+}
+
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -832,21 +845,16 @@ int nv10_graph_init(struct drm_device *dev)
(1<<31));
if (dev_priv->chipset >= 0x17) {
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(dev, 0x400a10, 0x3ff3fb6);
+ nv_wr32(dev, 0x400838, 0x2f8684);
+ nv_wr32(dev, 0x40083c, 0x115f3f);
nv_wr32(dev, 0x004006b0, 0x40000020);
} else
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PGRAPH_TILE(i),
- nv_rd32(dev, NV10_PFB_TILE(i)));
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
- nv_rd32(dev, NV10_PFB_TSTATUS(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
@@ -867,6 +875,115 @@ void nv10_graph_takedown(struct drm_device *dev)
{
}
+static int
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *ctx = chan->pgraph_ctx;
+ struct pipe_state *pipe = &ctx->pipe_state;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ ctx->lma_window[(mthd - 0x1638) / 4] = data;
+
+ if (mthd != 0x1644)
+ return 0;
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_SAVE(dev, pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+
+ PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
+
+ nouveau_wait_for_idle(dev);
+
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+
+ PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
+ nv_wr32(dev, 0x004006b0,
+ nv_rd32(dev, 0x004006b0) | 0x8 << 24);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
+ { 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, nv17_graph_mthd_lma_enable },
+ {}
+};
+
struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
{ 0x0030, false, NULL }, /* null */
{ 0x0039, false, NULL }, /* m2mf */
@@ -887,6 +1004,6 @@ struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
{ 0x0095, false, NULL }, /* multitex_tri */
{ 0x0056, false, NULL }, /* celcius (nv10) */
{ 0x0096, false, NULL }, /* celcius (nv11) */
- { 0x0099, false, NULL }, /* celcius (nv17) */
+ { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 46cfd9c..21ac6e4 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -33,13 +33,103 @@
#include "nouveau_hw.h"
#include "nv17_tv.h"
-enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask)
+static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+ fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+ uint32_t sample = 0;
+ int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+ testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
+ if (dev_priv->vbios->tvdactestval)
+ testval = dev_priv->vbios->tvdactestval;
+
+ dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ head = (dacclk & 0x100) >> 8;
+
+ /* Save the previous state. */
+ gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+ fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+ fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+ fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+ test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
+ ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
+ ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+
+ /* Prepare the DAC for load detection. */
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+ NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
+ NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x22);
+ msleep(1);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x21);
+
+ NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
+ NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
+
+ /* Sample pin 0x4 (usually S-video luma). */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0x4 << 28;
+
+ /* Sample the remaining pins. */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0xa << 28;
+
+ /* Restore the previous state. */
+ NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
+ NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
+ NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct dcb_entry *dcb = tv_enc->base.dcb;
- tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+ if (dev_priv->chipset == 0x42 ||
+ dev_priv->chipset == 0x43)
+ tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
+ else
+ tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
switch (tv_enc->pin_mask) {
case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
break;
case 0xe:
- if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+ if (dcb->tvconf.has_component_output)
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
else
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
}
drm_connector_property_set_value(connector,
- encoder->dev->mode_config.tv_subconnector_property,
- tv_enc->subconnector);
+ conf->tv_subconnector_property,
+ tv_enc->subconnector);
- return tv_enc->subconnector ? connector_status_connected :
- connector_status_disconnected;
+ if (tv_enc->subconnector) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
+ return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
+ }
}
static const struct {
@@ -219,7 +314,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
return;
nouveau_encoder(encoder)->last_dpms = mode;
- NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
mode, nouveau_encoder(encoder)->dcb->index);
regs->ptv_200 &= ~1;
@@ -484,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+ nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,
@@ -619,7 +716,7 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(tv_enc);
@@ -633,7 +730,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
.mode_set = nv17_tv_mode_set,
- .detect = nv17_dac_detect,
+ .detect = nv17_tv_detect,
};
static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 18ba74f..d6fc0a8 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
nouveau_wait_for_idle(dev);
}
+void
+nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+}
+
int
nv20_graph_init(struct drm_device *dev)
{
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
}
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+
for (i = 0; i < 8; i++) {
nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x4000c0, 0x00000016);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index ca1d271..3cd07d8 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -3,12 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ break;
+
+ default:
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ break;
+ }
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fb_bar_size, tmp;
- int num_tiles;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t tmp;
int i;
/* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
case 0x45:
tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
- num_tiles = NV10_PFB_TILE__SIZE;
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
break;
case 0x46: /* G72 */
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
case 0x4c: /* C51 (G7X version) */
- num_tiles = NV40_PFB_TILE__SIZE_1;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
break;
default:
- num_tiles = NV40_PFB_TILE__SIZE_0;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
break;
}
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- switch (dev_priv->chipset) {
- case 0x40:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- default:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV40_PFB_TILE(i), 0);
- nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index d3e0a2a..53e8afe 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -24,36 +24,10 @@
*
*/
-#include <linux/firmware.h>
-
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-
-MODULE_FIRMWARE("nouveau/nv40.ctxprog");
-MODULE_FIRMWARE("nouveau/nv40.ctxvals");
-MODULE_FIRMWARE("nouveau/nv41.ctxprog");
-MODULE_FIRMWARE("nouveau/nv41.ctxvals");
-MODULE_FIRMWARE("nouveau/nv42.ctxprog");
-MODULE_FIRMWARE("nouveau/nv42.ctxvals");
-MODULE_FIRMWARE("nouveau/nv43.ctxprog");
-MODULE_FIRMWARE("nouveau/nv43.ctxvals");
-MODULE_FIRMWARE("nouveau/nv44.ctxprog");
-MODULE_FIRMWARE("nouveau/nv44.ctxvals");
-MODULE_FIRMWARE("nouveau/nv46.ctxprog");
-MODULE_FIRMWARE("nouveau/nv46.ctxvals");
-MODULE_FIRMWARE("nouveau/nv47.ctxprog");
-MODULE_FIRMWARE("nouveau/nv47.ctxvals");
-MODULE_FIRMWARE("nouveau/nv49.ctxprog");
-MODULE_FIRMWARE("nouveau/nv49.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
-MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
-MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
+#include "nouveau_grctx.h"
struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
@@ -83,27 +57,30 @@ nv40_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int ret;
- /* Allocate a 175KiB block of PRAMIN to store the context. This
- * is massive overkill for a lot of chipsets, but it should be safe
- * until we're able to implement this properly (will happen at more
- * or less the same time we're able to write our own context programs.
- */
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
if (ret)
return ret;
- ctx = chan->ramin_grctx->gpuobj;
/* Initialise default context values */
dev_priv->engine.instmem.prepare_access(dev, true);
- nv40_grctx_vals_load(dev, ctx);
- nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
- dev_priv->engine.instmem.finish_access(dev);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv40_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
+ }
+ nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
+ chan->ramin_grctx->gpuobj->im_pramin->start);
+ dev_priv->engine.instmem.finish_access(dev);
return 0;
}
@@ -204,133 +181,46 @@ nv40_graph_unload_context(struct drm_device *dev)
return ret;
}
-struct nouveau_ctxprog {
- uint32_t signature;
- uint8_t version;
- uint16_t length;
- uint32_t data[];
-} __attribute__ ((packed));
-
-struct nouveau_ctxvals {
- uint32_t signature;
- uint8_t version;
- uint32_t length;
- struct {
- uint32_t offset;
- uint32_t value;
- } data[];
-} __attribute__ ((packed));
-
-int
-nv40_grctx_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- const int chipset = dev_priv->chipset;
- const struct firmware *fw;
- const struct nouveau_ctxprog *cp;
- const struct nouveau_ctxvals *cv;
- char name[32];
- int ret, i;
-
- pgraph->accel_blocked = true;
-
- if (!pgraph->ctxprog) {
- sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
- return ret;
- }
-
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- return -ENOMEM;
- }
- memcpy(pgraph->ctxprog, fw->data, fw->size);
-
- cp = pgraph->ctxprog;
- if (cp->signature != 0x5043564e || cp->version != 0 ||
- cp->length != ((fw->size - 7) / 4)) {
- NV_ERROR(dev, "ctxprog invalid\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- if (!pgraph->ctxvals) {
- sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
- nv40_grctx_fini(dev);
- return ret;
- }
-
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -ENOMEM;
- }
- memcpy(pgraph->ctxvals, fw->data, fw->size);
-
- cv = (void *)pgraph->ctxvals;
- if (cv->signature != 0x5643564e || cv->version != 0 ||
- cv->length != ((fw->size - 9) / 8)) {
- NV_ERROR(dev, "ctxvals invalid\n");
- release_firmware(fw);
- nv40_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- cp = pgraph->ctxprog;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < cp->length; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
-
- pgraph->accel_blocked = false;
- return 0;
-}
-
void
-nv40_grctx_fini(struct drm_device *dev)
+nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-
- if (pgraph->ctxprog) {
- kfree(pgraph->ctxprog);
- pgraph->ctxprog = NULL;
- }
+ uint32_t limit = max(1u, addr + size) - 1;
- if (pgraph->ctxvals) {
- kfree(pgraph->ctxprog);
- pgraph->ctxvals = NULL;
- }
-}
+ if (pitch)
+ addr |= 1;
-void
-nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_ctxvals *cv = pgraph->ctxvals;
- int i;
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x4a:
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ break;
- if (!cv)
- return;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
- for (i = 0; i < cv->length; i++)
- nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
+ default:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
+ }
}
/*
@@ -347,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv =
(struct drm_nouveau_private *)dev->dev_private;
- uint32_t vramsz, tmp;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -355,7 +246,26 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- nv40_grctx_init(dev);
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 175 * 1024;
+ }
+
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t cp[256];
+
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 256;
+ nv40_grctx_init(&ctx);
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ }
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -425,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
- /* copy tile info from PFB */
- switch (dev_priv->chipset) {
- case 0x40: /* vanilla NV40 */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- tmp = nv_rd32(dev, NV10_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- case 0x44:
- case 0x4a:
- case 0x4e: /* NV44-based cores don't have 0x406900? */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- }
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b: /* G7X-based cores */
- for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- default: /* everything else */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
/* begin RAM config */
vramsz = drm_get_resource_len(dev, 0) - 1;
@@ -535,6 +380,7 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
+ nouveau_grctx_fini(dev);
}
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
new file mode 100644
index 0000000..11b11c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* NVIDIA context programs handle a number of other conditions which are
+ * not implemented in our versions. It's not clear why NVIDIA context
+ * programs have this code, nor whether it's strictly necessary for
+ * correct operation. We'll implement additional handling if/when we
+ * discover it's necessary.
+ *
+ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
+ * flag is set, this gets saved into the context.
+ * - On context save, the context program for all cards load nsource
+ * into a flag register and check for ILLEGAL_MTHD. If it's set,
+ * opcode 0x60000d is called before resuming normal operation.
+ * - Some context programs check more conditions than the above. NV44
+ * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
+ * and calls 0x60000d before resuming normal operation.
+ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
+ * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
+ * and then the ctxprog is aborted. It looks like a complicated NOP,
+ * its purpose is unknown.
+ * - In the section of code that loads the per-vs state, NVIDIA check
+ * flag 10. If it's set, they only transfer the small 0x300 byte block
+ * of state + the state for a single vs as opposed to the state for
+ * all vs units. It doesn't seem likely that it'll occur in normal
+ * operation, especially seeing as it appears NVIDIA may have screwed
+ * up the ctxprogs for some cards and have an invalid instruction
+ * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
+ * - There's a number of places where context offset 0 (where we place
+ * the PRAMIN offset of the context) is loaded into either 0x408000,
+ * 0x408004 or 0x408008. Not sure what's up there either.
+ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
+ * path for auto-loadctx.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_STATUS ((3 * 32) + 0)
+#define CP_FLAG_STATUS_IDLE 0
+#define CP_FLAG_STATUS_BUSY 1
+#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_UNK54 ((3 * 32) + 6)
+#define CP_FLAG_UNK54_CLEAR 0
+#define CP_FLAG_UNK54_SET 1
+#define CP_FLAG_ALWAYS ((3 * 32) + 8)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_UNK57 ((3 * 32) + 9)
+#define CP_FLAG_UNK57_CLEAR 0
+#define CP_FLAG_UNK57_SET 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000fc000
+#define CP_CTX_COUNT_SHIFT 14
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0000ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEXT_TO_SWAP 0x00600007
+#define CP_NEXT_TO_CURRENT 0x00600009
+#define CP_SET_CONTEXT_POINTER 0x0060000a
+#define CP_END 0x0060000e
+#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
+#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
+#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/* TODO:
+ * - get vs count from 0x1540
+ * - document unimplemented bits compared to nvidia
+ * - nsource handling
+ * - R0 & 0x0200 handling
+ * - single-vs handling
+ * - 400314 bit 0
+ */
+
+static int
+nv40_graph_4097(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if ((dev_priv->chipset & 0xf0) == 0x60)
+ return 0;
+
+ return !!(0x0baf & (1 << dev_priv->chipset));
+}
+
+static int
+nv40_graph_vs_count(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ return 8;
+ case 0x40:
+ return 6;
+ case 0x41:
+ case 0x42:
+ return 5;
+ case 0x43:
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ return 3;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ return 1;
+ }
+}
+
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_swap_state3d_3_is_save,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void
+nv40_graph_construct_general(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x4000a4, 1);
+ gr_def(ctx, 0x4000a4, 0x00000008);
+ cp_ctx(ctx, 0x400144, 58);
+ gr_def(ctx, 0x400144, 0x00000001);
+ cp_ctx(ctx, 0x400314, 1);
+ gr_def(ctx, 0x400314, 0x00000000);
+ cp_ctx(ctx, 0x400400, 10);
+ cp_ctx(ctx, 0x400480, 10);
+ cp_ctx(ctx, 0x400500, 19);
+ gr_def(ctx, 0x400514, 0x00040000);
+ gr_def(ctx, 0x400524, 0x55555555);
+ gr_def(ctx, 0x400528, 0x55555555);
+ gr_def(ctx, 0x40052c, 0x55555555);
+ gr_def(ctx, 0x400530, 0x55555555);
+ cp_ctx(ctx, 0x400560, 6);
+ gr_def(ctx, 0x400568, 0x0000ffff);
+ gr_def(ctx, 0x40056c, 0x0000ffff);
+ cp_ctx(ctx, 0x40057c, 5);
+ cp_ctx(ctx, 0x400710, 3);
+ gr_def(ctx, 0x400710, 0x20010001);
+ gr_def(ctx, 0x400714, 0x0f73ef00);
+ cp_ctx(ctx, 0x400724, 1);
+ gr_def(ctx, 0x400724, 0x02008821);
+ cp_ctx(ctx, 0x400770, 3);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x400814, 4);
+ cp_ctx(ctx, 0x400828, 5);
+ cp_ctx(ctx, 0x400840, 5);
+ gr_def(ctx, 0x400850, 0x00000040);
+ cp_ctx(ctx, 0x400858, 4);
+ gr_def(ctx, 0x400858, 0x00000040);
+ gr_def(ctx, 0x40085c, 0x00000040);
+ gr_def(ctx, 0x400864, 0x80000000);
+ cp_ctx(ctx, 0x40086c, 9);
+ gr_def(ctx, 0x40086c, 0x80000000);
+ gr_def(ctx, 0x400870, 0x80000000);
+ gr_def(ctx, 0x400874, 0x80000000);
+ gr_def(ctx, 0x400878, 0x80000000);
+ gr_def(ctx, 0x400888, 0x00000040);
+ gr_def(ctx, 0x40088c, 0x80000000);
+ cp_ctx(ctx, 0x4009c0, 8);
+ gr_def(ctx, 0x4009cc, 0x80000000);
+ gr_def(ctx, 0x4009dc, 0x80000000);
+ } else {
+ cp_ctx(ctx, 0x400840, 20);
+ if (!nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
+ }
+ gr_def(ctx, 0x400880, 0x00000040);
+ gr_def(ctx, 0x400884, 0x00000040);
+ gr_def(ctx, 0x400888, 0x00000040);
+ cp_ctx(ctx, 0x400894, 11);
+ gr_def(ctx, 0x400894, 0x00000040);
+ if (nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
+ }
+ cp_ctx(ctx, 0x4008e0, 2);
+ cp_ctx(ctx, 0x4008f8, 2);
+ if (dev_priv->chipset == 0x4c ||
+ (dev_priv->chipset & 0xf0) == 0x60)
+ cp_ctx(ctx, 0x4009f8, 1);
+ }
+ cp_ctx(ctx, 0x400a00, 73);
+ gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
+ cp_ctx(ctx, 0x401000, 4);
+ cp_ctx(ctx, 0x405004, 1);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x403448, 1);
+ gr_def(ctx, 0x403448, 0x00001010);
+ break;
+ default:
+ cp_ctx(ctx, 0x403440, 1);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x403440, 0x00000010);
+ break;
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ gr_def(ctx, 0x403440, 0x00003010);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ gr_def(ctx, 0x403440, 0x00001010);
+ break;
+ }
+ break;
+ }
+}
+
+static void
+nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401880, 51);
+ gr_def(ctx, 0x401940, 0x00000100);
+ } else
+ if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ cp_ctx(ctx, 0x401880, 32);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
+ if (dev_priv->chipset == 0x46)
+ cp_ctx(ctx, 0x401900, 16);
+ cp_ctx(ctx, 0x401940, 3);
+ }
+ cp_ctx(ctx, 0x40194c, 18);
+ gr_def(ctx, 0x401954, 0x00000111);
+ gr_def(ctx, 0x401958, 0x00080060);
+ gr_def(ctx, 0x401974, 0x00000080);
+ gr_def(ctx, 0x401978, 0xffff0000);
+ gr_def(ctx, 0x40197c, 0x00000001);
+ gr_def(ctx, 0x401990, 0x46400000);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x4019a0, 2);
+ cp_ctx(ctx, 0x4019ac, 5);
+ } else {
+ cp_ctx(ctx, 0x4019a0, 1);
+ cp_ctx(ctx, 0x4019b4, 3);
+ }
+ gr_def(ctx, 0x4019bc, 0xffff0000);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x4019c0, 18);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
+ break;
+ }
+ cp_ctx(ctx, 0x401a08, 8);
+ gr_def(ctx, 0x401a10, 0x0fff0000);
+ gr_def(ctx, 0x401a14, 0x0fff0000);
+ gr_def(ctx, 0x401a1c, 0x00011100);
+ cp_ctx(ctx, 0x401a2c, 4);
+ cp_ctx(ctx, 0x401a44, 26);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
+ gr_def(ctx, 0x401a8c, 0x4b7fffff);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401ab8, 3);
+ } else {
+ cp_ctx(ctx, 0x401ab8, 1);
+ cp_ctx(ctx, 0x401ac0, 1);
+ }
+ cp_ctx(ctx, 0x401ad0, 8);
+ gr_def(ctx, 0x401ad0, 0x30201000);
+ gr_def(ctx, 0x401ad4, 0x70605040);
+ gr_def(ctx, 0x401ad8, 0xb8a89888);
+ gr_def(ctx, 0x401adc, 0xf8e8d8c8);
+ cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ gr_def(ctx, 0x401b10, 0x40100000);
+ cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ 0x00000004 : 0x00000000);
+ cp_ctx(ctx, 0x401b30, 25);
+ gr_def(ctx, 0x401b34, 0x0000ffff);
+ gr_def(ctx, 0x401b68, 0x435185d6);
+ gr_def(ctx, 0x401b6c, 0x2155b699);
+ gr_def(ctx, 0x401b70, 0xfedcba98);
+ gr_def(ctx, 0x401b74, 0x00000098);
+ gr_def(ctx, 0x401b84, 0xffffffff);
+ gr_def(ctx, 0x401b88, 0x00ff7000);
+ gr_def(ctx, 0x401b8c, 0x0000ffff);
+ if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
+ dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x401b94, 1);
+ cp_ctx(ctx, 0x401b98, 8);
+ gr_def(ctx, 0x401b9c, 0x00ff0000);
+ cp_ctx(ctx, 0x401bc0, 9);
+ gr_def(ctx, 0x401be0, 0x00ffff00);
+ cp_ctx(ctx, 0x401c00, 192);
+ for (i = 0; i < 16; i++) { /* fragment texture units */
+ gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
+ gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
+ gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
+ gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
+ gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
+ gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
+ }
+ for (i = 0; i < 4; i++) { /* vertex texture units */
+ gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
+ gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
+ gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
+ gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
+ }
+ cp_ctx(ctx, 0x400f5c, 3);
+ gr_def(ctx, 0x400f5c, 0x00000002);
+ cp_ctx(ctx, 0x400f84, 1);
+}
+
+static void
+nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x402000, 1);
+ cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402404, 0x00000001);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ gr_def(ctx, 0x402404, 0x00000020);
+ break;
+ case 0x46:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402404, 0x00000421);
+ break;
+ default:
+ gr_def(ctx, 0x402404, 0x00000021);
+ }
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402408, 0x030c30c3);
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ cp_ctx(ctx, 0x402440, 1);
+ gr_def(ctx, 0x402440, 0x00011001);
+ break;
+ default:
+ break;
+ }
+ cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ gr_def(ctx, 0x402488, 0x3e020200);
+ gr_def(ctx, 0x40248c, 0x00ffffff);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402490, 0x60103f00);
+ break;
+ case 0x47:
+ gr_def(ctx, 0x402490, 0x40103f00);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402490, 0x20103f00);
+ break;
+ default:
+ gr_def(ctx, 0x402490, 0x0c103f00);
+ break;
+ }
+ gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ 0x00020000 : 0x00040000);
+ cp_ctx(ctx, 0x402500, 31);
+ gr_def(ctx, 0x402530, 0x00008100);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x40257c, 6);
+ cp_ctx(ctx, 0x402594, 16);
+ cp_ctx(ctx, 0x402800, 17);
+ gr_def(ctx, 0x402800, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402864, 1);
+ gr_def(ctx, 0x402864, 0x00001001);
+ cp_ctx(ctx, 0x402870, 3);
+ gr_def(ctx, 0x402878, 0x00000003);
+ if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ cp_ctx(ctx, 0x402900, 1);
+ cp_ctx(ctx, 0x402940, 1);
+ cp_ctx(ctx, 0x402980, 1);
+ cp_ctx(ctx, 0x4029c0, 1);
+ cp_ctx(ctx, 0x402a00, 1);
+ cp_ctx(ctx, 0x402a40, 1);
+ cp_ctx(ctx, 0x402a80, 1);
+ cp_ctx(ctx, 0x402ac0, 1);
+ }
+ break;
+ case 0x40:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00000001);
+ cp_ctx(ctx, 0x402850, 1);
+ break;
+ default:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00001001);
+ cp_ctx(ctx, 0x402850, 2);
+ gr_def(ctx, 0x402854, 0x00000003);
+ break;
+ }
+
+ cp_ctx(ctx, 0x402c00, 4);
+ gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ 0x80800001 : 0x00888001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402c20, 40);
+ for (i = 0; i < 32; i++)
+ gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
+ cp_ctx(ctx, 0x4030b8, 13);
+ gr_def(ctx, 0x4030dc, 0x00000005);
+ gr_def(ctx, 0x4030e8, 0x0000ffff);
+ break;
+ default:
+ cp_ctx(ctx, 0x402c10, 4);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x402c20, 36);
+ else
+ if (dev_priv->chipset <= 0x42)
+ cp_ctx(ctx, 0x402c20, 24);
+ else
+ if (dev_priv->chipset <= 0x4a)
+ cp_ctx(ctx, 0x402c20, 16);
+ else
+ cp_ctx(ctx, 0x402c20, 8);
+ cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ gr_def(ctx, 0x402cd4, 0x00000005);
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402ce0, 0x0000ffff);
+ break;
+ }
+
+ cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
+ for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
+
+ if (dev_priv->chipset != 0x40) {
+ cp_ctx(ctx, 0x403600, 1);
+ gr_def(ctx, 0x403600, 0x00000001);
+ }
+ cp_ctx(ctx, 0x403800, 1);
+
+ cp_ctx(ctx, 0x403c18, 1);
+ gr_def(ctx, 0x403c18, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x405018, 1);
+ gr_def(ctx, 0x405018, 0x08e00001);
+ cp_ctx(ctx, 0x405c24, 1);
+ gr_def(ctx, 0x405c24, 0x000e3000);
+ break;
+ }
+ if (dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x405800, 11);
+ cp_ctx(ctx, 0x407000, 1);
+}
+
+static void
+nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+{
+ int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+
+ cp_out (ctx, 0x300000);
+ cp_lsr (ctx, len - 4);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
+ cp_lsr (ctx, len);
+ cp_name(ctx, cp_swap_state3d_3_is_save);
+ cp_out (ctx, 0x800001);
+
+ ctx->ctxvals_pos += len;
+}
+
+static void
+nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+{
+ struct drm_device *dev = ctx->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = ctx->data;
+ int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
+ int offset, i;
+
+ vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr_b0 = 363;
+ vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
+ if (dev_priv->chipset == 0x40) {
+ b0_offset = 0x2200/4; /* 33a0 */
+ b1_offset = 0x55a0/4; /* 1500 */
+ vs_len = 0x6aa0/4;
+ } else
+ if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ b0_offset = 0x2200/4; /* 2200 */
+ b1_offset = 0x4400/4; /* 0b00 */
+ vs_len = 0x4f00/4;
+ } else {
+ b0_offset = 0x1d40/4; /* 2200 */
+ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
+ vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+ }
+
+ cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
+ cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+
+ offset = ctx->ctxvals_pos;
+ ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ offset += 0x0280/4;
+ for (i = 0; i < 16; i++, offset += 2)
+ nv_wo32(dev, obj, offset, 0x3f800000);
+
+ for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
+ for (i = 0; i < vs_nr_b0 * 6; i += 6)
+ nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+ for (i = 0; i < vs_nr_b1 * 4; i += 4)
+ nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+ }
+}
+
+void
+nv40_grctx_init(struct nouveau_grctx *ctx)
+{
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_name(ctx, cp_setup_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_out (ctx, 0x00910880); /* ?? */
+ cp_out (ctx, 0x00901ffe); /* ?? */
+ cp_out (ctx, 0x01940000); /* ?? */
+ cp_lsr (ctx, 0x20);
+ cp_out (ctx, 0x0060000b); /* ?? */
+ cp_wait(ctx, UNK57, CLEAR);
+ cp_out (ctx, 0x0060000c); /* ?? */
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_pos (ctx, 0x00020/4);
+ nv40_graph_construct_general(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 1 */
+ cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
+ nv40_graph_construct_state3d(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 2 */
+ nv40_graph_construct_state3d_2(ctx);
+
+ /* Some other block of "random" state */
+ nv40_graph_construct_state3d_3(ctx);
+
+ /* Per-vertex shader state */
+ cp_pos (ctx, ctx->ctxvals_pos);
+ nv40_graph_construct_shader(ctx);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index f8e28a1..d1a651e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -45,7 +45,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
int i;
- NV_DEBUG(crtc->dev, "\n");
+ NV_DEBUG_KMS(crtc->dev, "\n");
for (i = 0; i < 256; i++) {
writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -68,8 +68,8 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
struct nouveau_channel *evo = dev_priv->evo;
int index = nv_crtc->index, ret;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
- NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
if (blanked) {
nv_crtc->cursor.hide(nv_crtc, false);
@@ -139,7 +139,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret) {
@@ -193,7 +193,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
uint32_t outX, outY, horiz, vert;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
switch (scaling_mode) {
case DRM_MODE_SCALE_NONE:
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG(dev, "\n");
+ struct drm_device *dev;
+ struct nouveau_crtc *nv_crtc;
if (!crtc)
return;
+ dev = crtc->dev;
+ nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(dev, "\n");
+
drm_crtc_cleanup(&nv_crtc->base);
nv50_cursor_fini(nv_crtc);
@@ -432,16 +435,36 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ uint32_t dac = 0, sor = 0;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
/* Disconnect all unused encoders. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (drm_helper_encoder_in_use(encoder))
+ if (!drm_helper_encoder_in_use(encoder))
continue;
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV)
+ dac |= (1 << nv_encoder->or);
+ else
+ sor |= (1 << nv_encoder->or);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
+ nv_encoder->dcb->type == OUTPUT_TV) {
+ if (dac & (1 << nv_encoder->or))
+ continue;
+ } else {
+ if (sor & (1 << nv_encoder->or))
+ continue;
+ }
+
nv_encoder->disconnect(nv_encoder);
}
@@ -458,7 +481,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
@@ -497,7 +520,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
int ret, format;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
switch (drm_fb->depth) {
case 8:
@@ -612,7 +635,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*nv_crtc->mode = *adjusted_mode;
- NV_DEBUG(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
@@ -706,7 +729,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
struct nouveau_crtc *nv_crtc = NULL;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index e2e79a8..753e723 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -41,7 +41,7 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
struct drm_device *dev = nv_crtc->base.dev;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (update && nv_crtc->cursor.visible)
return;
@@ -76,7 +76,7 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
struct drm_device *dev = nv_crtc->base.dev;
int ret;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
if (update && !nv_crtc->cursor.visible)
return;
@@ -116,7 +116,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
static void
nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
{
- NV_DEBUG(nv_crtc->base.dev, "\n");
+ NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
if (offset == nv_crtc->cursor.offset)
return;
@@ -143,7 +143,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
struct drm_device *dev = nv_crtc->base.dev;
int idx = nv_crtc->index;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index fb5838e..f08f042 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -44,7 +44,7 @@ nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -81,11 +81,11 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
/* Use bios provided value if possible. */
if (dev_priv->vbios->dactestval) {
load_pattern = dev_priv->vbios->dactestval;
- NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
+ NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
load_pattern);
} else {
load_pattern = 340;
- NV_DEBUG(dev, "Using default load_pattern of %d\n",
+ NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
load_pattern);
}
@@ -103,9 +103,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
status = connector_status_connected;
if (status == connector_status_connected)
- NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
+ NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
else
- NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
+ NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
return status;
}
@@ -118,7 +118,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
/* wait for it to be done */
if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
@@ -173,7 +173,7 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *connector;
- NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
connector = nouveau_encoder_connector_get(nv_encoder);
if (!connector) {
@@ -213,7 +213,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
- NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -264,7 +264,7 @@ nv50_dac_destroy(struct drm_encoder *encoder)
if (!encoder)
return;
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
@@ -280,7 +280,7 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
NV_INFO(dev, "Detected a DAC output\n");
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 12c5ee6..90f0bf5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -188,7 +188,7 @@ nv50_display_init(struct drm_device *dev)
uint64_t start;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
/*
@@ -232,7 +232,7 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
/* RAM is clamped to 256 MiB. */
ram_amount = nouveau_mem_fb_amount(dev);
- NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
+ NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
if (ram_amount > 256*1024*1024)
ram_amount = 256*1024*1024;
nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
@@ -398,7 +398,7 @@ static int nv50_display_disable(struct drm_device *dev)
struct drm_crtc *drm_crtc;
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -469,7 +469,7 @@ int nv50_display_create(struct drm_device *dev)
uint32_t connector[16] = {};
int ret, i;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -573,7 +573,7 @@ int nv50_display_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
@@ -617,7 +617,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
* CRTC separately, and submission will be blocked by the GPU
* until we handle each in turn.
*/
- NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
head = ffs((unk30 >> 9) & 3) - 1;
if (head < 0)
return -EINVAL;
@@ -661,7 +661,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
or = i;
}
- NV_DEBUG(dev, "type %d, or %d\n", type, or);
+ NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
if (type == OUTPUT_ANY) {
NV_ERROR(dev, "unknown encoder!!\n");
return -1;
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = NULL;
+ struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->VBIOS;
uint32_t mc, script = 0, or;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb != dcbent)
+ continue;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ break;
+ }
+
or = ffs(dcbent->or) - 1;
mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
} else
if (bios->fp.strapless_is_24bit & 1)
script |= 0x0200;
+
+ if (nv_connector && nv_connector->edid &&
+ (nv_connector->edid->revision >= 4) &&
+ (nv_connector->edid->input & 0x70) >= 0x20)
+ script |= 0x0200;
}
if (nouveau_uscript_lvds >= 0) {
@@ -811,7 +828,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
script = nv50_display_script_select(dev, dcbent, pclk);
- NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
+ NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
if (dcbent->type != OUTPUT_DP)
nouveau_bios_run_display_table(dev, dcbent, 0, -2);
@@ -870,7 +887,7 @@ nv50_display_irq_handler_bh(struct work_struct *work)
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
- NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
nv50_display_unk10_handler(dev);
@@ -974,7 +991,7 @@ nv50_display_irq_handler(struct drm_device *dev)
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
uint32_t clock;
- NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
if (!intr0 && !(intr1 & ~delayed))
break;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bcc6d3..0f57cdf 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 1);
}
BEGIN_RING(chan, NvSub2D, 0x0588, 1);
- OUT_RING(chan, rect->color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
@@ -44,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int push = dwords > 2047 ? 2047 : dwords;
if (RING_SPACE(chan, push + 1)) {
- NV_ERROR(dev,
- "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
ret = RING_SPACE(chan, 59);
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ nouveau_fbcon_gpu_lockup(info);
return ret;
}
@@ -265,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
dev_priv->vm_vram_base);
- info->fbops->fb_fillrect = nv50_fbcon_fillrect;
- info->fbops->fb_copyarea = nv50_fbcon_copyarea;
- info->fbops->fb_imageblit = nv50_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 77ae1aa..204a79f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -272,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
ramfc = chan->ramfc->gpuobj;
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
0, &chan->cache);
if (ret)
return ret;
@@ -317,17 +317,20 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
-
+ /* This will ensure the channel is seen as disabled. */
+ chan->ramfc = NULL;
nv50_fifo_channel_disable(dev, chan->id, false);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, false);
+
+ nouveau_gpuobj_ref_del(dev, &ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
}
int
@@ -384,8 +387,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
nv_ro32(dev, cache, (ptr * 2) + 1));
}
- nv_wr32(dev, 0x3210, cnt << 2);
- nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (!IS_G80) {
@@ -398,8 +401,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
@@ -416,7 +417,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
NV_DEBUG(dev, "\n");
chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
chan = dev_priv->fifos[chid];
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 177d822..6d50480 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -84,7 +84,7 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x401804, 0xc0000000);
+ nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
@@ -107,9 +107,13 @@ nv50_graph_init_regs(struct drm_device *dev)
static int
nv50_graph_init_ctxctl(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
NV_DEBUG(dev, "\n");
- nv40_grctx_init(dev);
+ nouveau_grctx_prog_load(dev);
+ if (!dev_priv->engine.graph.ctxprog)
+ dev_priv->engine.graph.accel_blocked = true;
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -140,7 +144,7 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
- nv40_grctx_fini(dev);
+ nouveau_grctx_fini(dev);
}
void
@@ -161,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
uint32_t inst;
int i;
+ /* Be sure we're not in the middle of a context switch or bad things
+ * will happen, such as unloading the wrong pgraph context.
+ */
+ if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "Ctxprog is still running\n");
+
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
@@ -207,7 +217,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
dev_priv->engine.instmem.prepare_access(dev, true);
- nv40_grctx_vals_load(dev, ctx);
+ nouveau_grctx_vals_load(dev, ctx);
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
if ((dev_priv->chipset & 0xf0) == 0xa0)
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
@@ -271,19 +281,18 @@ nv50_graph_load_context(struct nouveau_channel *chan)
int
nv50_graph_unload_context(struct drm_device *dev)
{
- uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+ uint32_t inst;
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return 0;
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
- nv_wr32(dev, 0x400500, fifo & ~1);
+ nouveau_wait_for_idle(dev);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400500, fifo);
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f7..f0dc4e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+ else
+ dev_priv->vram_sys_base = 0;
+
/* Reserve the last MiB of VRAM, we should probably try to avoid
* setting up the below tables over the top of the VBIOS image at
* some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
* We map the entire fake channel into the start of the PRAMIN BAR
*/
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
- 0, &priv->pramin_pt);
+ 0, &priv->pramin_pt);
if (ret)
return ret;
- for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
- if (v < (c_offset + c_size))
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
- else
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ v = c_offset | 1;
+ if (dev_priv->vram_sys_base) {
+ v += dev_priv->vram_sys_base;
+ v |= 0x30;
+ }
+
+ i = 0;
+ while (v < dev_priv->vram_sys_base + c_offset + c_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ v += 0x1000;
+ i += 8;
+ }
+
+ while (i < pt_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ i += 8;
}
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end, vram;
+ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+ uint32_t pte, pte_end;
+ uint64_t vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ vram |= 1;
+ if (dev_priv->vram_sys_base) {
+ vram += dev_priv->vram_sys_base;
+ vram |= 0x30;
+ }
+
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-
- pte += 8;
+ nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
+ nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
- pte += 8;
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
dev_priv->engine.instmem.finish_access(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 8c28046..c2fff54 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -44,7 +44,7 @@ nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
struct nouveau_channel *evo = dev_priv->evo;
int ret;
- NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -70,7 +70,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
}
if (dpe->script0) {
- NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
nv_encoder->dcb);
}
@@ -79,7 +79,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
if (dpe->script1) {
- NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
nv_encoder->dcb);
}
@@ -90,10 +90,24 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder *enc;
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+
+ nv_encoder->last_dpms = mode;
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nvenc = nouveau_encoder(enc);
+
+ if (nvenc == nv_encoder ||
+ nvenc->disconnect != nv50_sor_disconnect ||
+ nvenc->dcb->or != nv_encoder->dcb->or)
+ continue;
+
+ if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
+ return;
+ }
/* wait for it to be done */
if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
@@ -142,7 +156,7 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *connector;
- NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
connector = nouveau_encoder_connector_get(nv_encoder);
if (!connector) {
@@ -182,7 +196,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0;
int ret;
- NV_DEBUG(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -246,7 +260,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
if (!encoder)
return;
- NV_DEBUG(encoder->dev, "\n");
+ NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
@@ -265,7 +279,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
bool dum;
int type;
- NV_DEBUG(dev, "\n");
+ NV_DEBUG_KMS(dev, "\n");
switch (entry->type) {
case OUTPUT_TMDS:
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 601f4c0..b806fdc 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -64,7 +64,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676..51c99fc 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
&init->agp_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
}
typedef struct drm_r128_depth32 {
@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
&depth->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
}
@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
&stipple->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
}
typedef struct drm_r128_getparam32 {
@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321..1c02d23 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default"
+ bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ This is a completely new driver. It's only part of the existing drm
+ for compatibility reasons. It requires an entirely different graphics
+ stack above it and works very differently from the old drm stack.
+ i.e. don't enable this unless you know what you are doing it may
+ cause issues or bugs compared to the previous userspace driver stack.
When kernel modesetting is enabled the IOCTL of radeon/drm
driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index feb52ee..1cc7b93 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
$(obj)/r300.o: $(obj)/r300_reg_safe.h
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
@@ -49,7 +54,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index 6d0183c..c714179 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -1,5 +1,5 @@
/*
-* Copyright 2006-2007 Advanced Micro Devices, Inc.
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
/****************************************************/
/* Encoder Object ID Definition */
/****************************************************/
-#define ENCODER_OBJECT_ID_NONE 0x00
+#define ENCODER_OBJECT_ID_NONE 0x00
/* Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
-#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
@@ -56,11 +56,11 @@
#define ENCODER_OBJECT_ID_SI170B 0x08
#define ENCODER_OBJECT_ID_CH7303 0x09
#define ENCODER_OBJECT_ID_CH7301 0x0A
-#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
#define ENCODER_OBJECT_ID_TITFP513 0x0E
-#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
@@ -68,9 +68,9 @@
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
-#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
-#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
-#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
#define ENCODER_OBJECT_ID_VT1625 0x1A
#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
@@ -86,7 +86,7 @@
/****************************************************/
/* Connector Object ID Definition */
/****************************************************/
-#define CONNECTOR_OBJECT_ID_NONE 0x00
+#define CONNECTOR_OBJECT_ID_NONE 0x00
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
@@ -96,7 +96,7 @@
#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
#define CONNECTOR_OBJECT_ID_YPbPr 0x08
#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
-#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
#define CONNECTOR_OBJECT_ID_SCART 0x0B
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
@@ -106,6 +106,8 @@
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
+#define CONNECTOR_OBJECT_ID_eDP 0x14
+#define CONNECTOR_OBJECT_ID_MXM 0x15
/* deleted */
@@ -116,6 +118,14 @@
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
/****************************************************/
+/* Generic Object ID Definition */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE 0x00
+#define GENERIC_OBJECT_ID_GLSYNC 0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
+#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+
+/****************************************************/
/* Graphics Object ENUM ID Definition */
/****************************************************/
#define GRAPH_OBJECT_ENUM_ID1 0x01
@@ -124,6 +134,7 @@
#define GRAPH_OBJECT_ENUM_ID4 0x04
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
+#define GRAPH_OBJECT_ENUM_ID7 0x07
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -133,35 +144,35 @@
#define RESERVED1_ID_MASK 0x0800
#define OBJECT_TYPE_MASK 0x7000
#define RESERVED2_ID_MASK 0x8000
-
+
#define OBJECT_ID_SHIFT 0x00
#define ENUM_ID_SHIFT 0x08
#define OBJECT_TYPE_SHIFT 0x0C
+
/****************************************************/
/* Graphics Object family definition */
/****************************************************/
-#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
- (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
- GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
/****************************************************/
/* GPU Object ID definition - Shared with BIOS */
/****************************************************/
-#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
/****************************************************/
/* Encoder Object ID definition - Shared with BIOS */
/****************************************************/
/*
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
-#define ENCODER_SIL170B_ENUM_ID1 0x2108
+#define ENCODER_SIL170B_ENUM_ID1 0x2108
#define ENCODER_CH7303_ENUM_ID1 0x2109
#define ENCODER_CH7301_ENUM_ID1 0x210A
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
@@ -175,8 +186,8 @@
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
-#define ENCODER_SI178_ENUM_ID1 0x2117
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
+#define ENCODER_SI178_ENUM_ID1 0x2117
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
#define ENCODER_VT1625_ENUM_ID1 0x211A
@@ -185,205 +196,169 @@
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
*/
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_SIL170B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7303_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7301_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_TITFP513_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1623_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1930_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
-
-#define ENCODER_SI178_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
-
-#define ENCODER_MVPU_FPGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1625_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1932_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_DP501_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_AN9801_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
*/
-#define CONNECTOR_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_COMPOSITE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SVIDEO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_YPbPr_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SCART_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
-#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
/* deleted */
/****************************************************/
+/* Generic Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
+
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@@ -575,4 +636,8 @@
#pragma pack()
#endif
-#endif /*GRAPHICTYPE */
+#endif /*GRAPHICTYPE */
+
+
+
+
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 6578d19..7f152f6 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#define ATOM_DEBUG
@@ -58,6 +59,7 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
@@ -211,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
- val = le32_to_cpu(ctx->ps[idx]);
+ /* get_unaligned_le32 avoids unaligned accesses from atombios
+ * tables, noticed on a DEC Alpha. */
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -245,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
val = gctx->io_attr;
break;
+ case ATOM_WS_REGPTR:
+ val = gctx->reg_block;
+ break;
default:
val = ctx->ws[idx];
}
@@ -384,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
return atom_get_src_int(ctx, attr, ptr, NULL, 1);
}
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+ uint32_t val = 0xCDCDCDCD;
+
+ switch (align) {
+ case ATOM_SRC_DWORD:
+ val = U32(*ptr);
+ (*ptr) += 4;
+ break;
+ case ATOM_SRC_WORD0:
+ case ATOM_SRC_WORD8:
+ case ATOM_SRC_WORD16:
+ val = U16(*ptr);
+ (*ptr) += 2;
+ break;
+ case ATOM_SRC_BYTE0:
+ case ATOM_SRC_BYTE8:
+ case ATOM_SRC_BYTE16:
+ case ATOM_SRC_BYTE24:
+ val = U8(*ptr);
+ (*ptr)++;
+ break;
+ }
+ return val;
+}
+
static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
int *ptr, uint32_t *saved, int print)
{
@@ -481,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_WS_ATTRIBUTES:
gctx->io_attr = val;
break;
+ case ATOM_WS_REGPTR:
+ gctx->reg_block = val;
+ break;
default:
ctx->ws[idx] = val;
}
@@ -573,7 +609,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -607,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
uint8_t count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
- schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+ udelay(count);
else
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
}
@@ -676,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
SDEBUG(" src1: ");
- src1 = atom_get_src(ctx, attr, ptr);
+ src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
SDEBUG(" src2: ");
src2 = atom_get_src(ctx, attr, ptr);
dst &= src1;
@@ -808,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
}
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst <<= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+ uint8_t attr = U8((*ptr)++), shift;
+ uint32_t saved, dst;
+ int dptr = *ptr;
+ attr &= 0x38;
+ attr |= atom_def_dst[attr >> 3] << 6;
+ SDEBUG(" dst: ");
+ dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+ SDEBUG(" shift: %d\n", shift);
+ dst >>= shift;
+ SDEBUG(" dst: ");
+ atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++), shift;
@@ -817,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst <<= shift;
SDEBUG(" dst: ");
@@ -833,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- shift = U8((*ptr)++);
+ shift = atom_get_src(ctx, attr, ptr);
SDEBUG(" shift: %d\n", shift);
dst >>= shift;
SDEBUG(" dst: ");
@@ -936,18 +1004,18 @@ static struct {
atom_op_or, ATOM_ARG_FB}, {
atom_op_or, ATOM_ARG_PLL}, {
atom_op_or, ATOM_ARG_MC}, {
- atom_op_shl, ATOM_ARG_REG}, {
- atom_op_shl, ATOM_ARG_PS}, {
- atom_op_shl, ATOM_ARG_WS}, {
- atom_op_shl, ATOM_ARG_FB}, {
- atom_op_shl, ATOM_ARG_PLL}, {
- atom_op_shl, ATOM_ARG_MC}, {
- atom_op_shr, ATOM_ARG_REG}, {
- atom_op_shr, ATOM_ARG_PS}, {
- atom_op_shr, ATOM_ARG_WS}, {
- atom_op_shr, ATOM_ARG_FB}, {
- atom_op_shr, ATOM_ARG_PLL}, {
- atom_op_shr, ATOM_ARG_MC}, {
+ atom_op_shift_left, ATOM_ARG_REG}, {
+ atom_op_shift_left, ATOM_ARG_PS}, {
+ atom_op_shift_left, ATOM_ARG_WS}, {
+ atom_op_shift_left, ATOM_ARG_FB}, {
+ atom_op_shift_left, ATOM_ARG_PLL}, {
+ atom_op_shift_left, ATOM_ARG_MC}, {
+ atom_op_shift_right, ATOM_ARG_REG}, {
+ atom_op_shift_right, ATOM_ARG_PS}, {
+ atom_op_shift_right, ATOM_ARG_WS}, {
+ atom_op_shift_right, ATOM_ARG_FB}, {
+ atom_op_shift_right, ATOM_ARG_PLL}, {
+ atom_op_shift_right, ATOM_ARG_MC}, {
atom_op_mul, ATOM_ARG_REG}, {
atom_op_mul, ATOM_ARG_PS}, {
atom_op_mul, ATOM_ARG_WS}, {
@@ -1040,7 +1108,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1057,8 +1125,6 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
- /* reset reg block */
- ctx->reg_block = 0;
ectx.ctx = ctx;
ectx.ps_shift = ps / 4;
ectx.start = base;
@@ -1092,6 +1158,19 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
kfree(ectx.ws);
}
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ mutex_lock(&ctx->mutex);
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
+ atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+}
+
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 6671848..bc73781 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -91,6 +91,7 @@
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
+#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1
@@ -120,6 +121,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
+ struct mutex mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5f48515..91ad0d1 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
} ATOM_POWERPLAY_INFO_V3;
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+// remaining 3 bits are reserved
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ UCHAR ucUnused1[3];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
/**************************************************************************/
/* Following definitions are for compatiblity issue in different SW components. */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 260fcf5..af464e35 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -307,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc dtd timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -347,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
- printk("executing set crtc timing\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -409,59 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
}
}
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+union adjust_pixel_clock {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct radeon_pll *pll)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
- uint8_t frev, crev;
- int index;
- SET_PIXEL_CLOCK_PS_ALLOCATION args;
- PIXEL_CLOCK_PARAMETERS *spc1_ptr;
- PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
- PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
- uint32_t pll_clock = mode->clock;
- uint32_t adjusted_clock;
- uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- int pll_flags = 0;
+ u32 adjusted_clock = mode->clock;
- memset(&args, 0, sizeof(args));
+ /* reset the pll flags */
+ pll->flags = 0;
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
- pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
- RADEON_PLL_PREFER_CLOSEST_LOWER);
+ pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+ RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
} else {
- pll_flags |= RADEON_PLL_LEGACY;
+ pll->flags |= RADEON_PLL_LEGACY;
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
- if (!ASIC_IS_AVIVO(rdev)) {
- if (encoder->encoder_type !=
- DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS)
- pll_flags |= RADEON_PLL_USE_REF_DIV;
- }
radeon_encoder = to_radeon_encoder(encoder);
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+ adjusted_clock = mode->clock * 2;
+ } else {
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ }
break;
}
}
@@ -471,46 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
* special hw requirements.
*/
if (ASIC_IS_DCE3(rdev)) {
- ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+ union adjust_pixel_clock args;
+ struct radeon_encoder_atom_dig *dig;
+ u8 frev, crev;
+ int index;
- if (!encoder)
- return;
-
- memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
- adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
- adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
- adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ if (!radeon_encoder->enc_priv)
+ return adjusted_clock;
+ dig = radeon_encoder->enc_priv;
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
- atom_execute_table(rdev->mode_info.atom_context,
- index, (uint32_t *)&adjust_pll_args);
- adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
- } else {
- /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
- if (ASIC_IS_AVIVO(rdev) &&
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
- adjusted_clock = mode->clock * 2;
- else
- adjusted_clock = mode->clock;
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ memset(&args, 0, sizeof(args));
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return adjusted_clock;
+ }
}
+ return adjusted_clock;
+}
+
+union set_pixel_clock {
+ SET_PIXEL_CLOCK_PS_ALLOCATION base;
+ PIXEL_CLOCK_PARAMETERS v1;
+ PIXEL_CLOCK_PARAMETERS_V2 v2;
+ PIXEL_CLOCK_PARAMETERS_V3 v3;
+};
+
+void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ u32 adjusted_clock;
+
+ memset(&args, 0, sizeof(args));
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
+ /* adjust pixel clock as needed */
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_new_pll)
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
else
radeon_compute_pll(pll, adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
} else
radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -520,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
case 1:
switch (crev) {
case 1:
- spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
- spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc1_ptr->ucFracFbDiv = frac_fb_div;
- spc1_ptr->ucPostDiv = post_div;
- spc1_ptr->ucPpll =
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v1.usRefDiv = cpu_to_le16(ref_div);
+ args.v1.usFbDiv = cpu_to_le16(fb_div);
+ args.v1.ucFracFbDiv = frac_fb_div;
+ args.v1.ucPostDiv = post_div;
+ args.v1.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc1_ptr->ucRefDivSrc = 1;
+ args.v1.ucCRTC = radeon_crtc->crtc_id;
+ args.v1.ucRefDivSrc = 1;
break;
case 2:
- spc2_ptr =
- (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
- spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc2_ptr->ucFracFbDiv = frac_fb_div;
- spc2_ptr->ucPostDiv = post_div;
- spc2_ptr->ucPpll =
+ args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v2.usRefDiv = cpu_to_le16(ref_div);
+ args.v2.usFbDiv = cpu_to_le16(fb_div);
+ args.v2.ucFracFbDiv = frac_fb_div;
+ args.v2.ucPostDiv = post_div;
+ args.v2.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
- spc2_ptr->ucRefDivSrc = 1;
+ args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucRefDivSrc = 1;
break;
case 3:
- if (!encoder)
- return;
- spc3_ptr =
- (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
- spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
- spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
- spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
- spc3_ptr->ucFracFbDiv = frac_fb_div;
- spc3_ptr->ucPostDiv = post_div;
- spc3_ptr->ucPpll =
+ args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.usRefDiv = cpu_to_le16(ref_div);
+ args.v3.usFbDiv = cpu_to_le16(fb_div);
+ args.v3.ucFracFbDiv = frac_fb_div;
+ args.v3.ucPostDiv = post_div;
+ args.v3.ucPpll =
radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
- spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
- spc3_ptr->ucEncoderMode =
+ args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucTransmitterId = radeon_encoder->encoder_id;
+ args.v3.ucEncoderMode =
atombios_get_encoder_mode(encoder);
break;
default:
@@ -571,12 +615,11 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
return;
}
- printk("executing set pll\n");
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -706,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_AVIVO(rdev))
+ return avivo_crtc_set_base(crtc, x, y, old_fb);
+ else
+ return radeon_crtc_set_base(crtc, x, y, old_fb);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ u32 disp_merge_cntl;
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+ break;
+ case 1:
+ disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+ disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+ WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+ break;
+ }
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -727,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
else {
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- radeon_crtc_set_base(crtc, x, y, old_fb);
- radeon_legacy_atom_set_surface(crtc);
+ atombios_crtc_set_base(crtc, x, y, old_fb);
+ radeon_legacy_atom_fixup(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
@@ -746,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
atombios_lock_crtc(crtc, 1);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0d63c44..99915a6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
+ int retry_count = 0;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+retry:
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus) {
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ if (args.ucReplyStatus && !args.ucDataOutLen) {
+ if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ goto retry;
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus);
+ chan->rec.i2c_id, args.ucReplyStatus, retry_count);
return false;
}
@@ -468,7 +472,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
radeon_connector = to_radeon_connector(connector);
@@ -582,7 +587,8 @@ void dp_link_train(struct drm_encoder *encoder,
u8 train_set[4];
int i;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
if (!radeon_encoder->enc_priv)
@@ -594,21 +600,14 @@ void dp_link_train(struct drm_encoder *encoder,
return;
dig_connector = radeon_connector->con_priv;
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_LINK_A;
- } else {
- if (dig_connector->linkb)
- enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
- else
- enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
- }
+ if (dig->dig_encoder)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
if (dig_connector->dp_clock == 270000)
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 0d79577..607241c 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
fseek(file, 0, SEEK_SET);
/* get header */
- if (fgets(buf, 1024, file) == NULL)
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
return -1;
+ }
/* first line will contain the last register
* and gpu name */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 824cc64..c0d4650 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
break;
}
}
- r100_irq_set(rdev);
+ if (rdev->irq.installed)
+ r100_irq_set(rdev);
}
void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
@@ -348,14 +354,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Who ever call radeon_fence_emit should call ring_lock and ask
- * for enough space (today caller are ib schedule and buffer move) */
+ /* We have to make sure that caches are flushed before
+ * CPU might read something from VRAM. */
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -1374,7 +1391,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_ARGB4444:
case RADEON_TXFORMAT_VYUY422:
case RADEON_TXFORMAT_YVYU422:
- case RADEON_TXFORMAT_DXT1:
case RADEON_TXFORMAT_SHADOW16:
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
@@ -1382,12 +1398,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
- case RADEON_TXFORMAT_DXT23:
- case RADEON_TXFORMAT_DXT45:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
break;
+ case RADEON_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case RADEON_TXFORMAT_DXT23:
+ case RADEON_TXFORMAT_DXT45:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -1487,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
+ track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
@@ -1707,14 +1731,6 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
-void r100_hdp_flush(struct radeon_device *rdev)
-{
- u32 tmp;
- tmp = RREG32(RADEON_HOST_PATH_CNTL);
- tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
-}
-
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -2731,6 +2747,7 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
}
static int r100_cs_track_cube(struct radeon_device *rdev,
@@ -2760,6 +2777,36 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
return 0;
}
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
@@ -2797,9 +2844,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
- size += w * h;
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h);
+ /* compressed textures are block based */
+ } else
+ size += w * h;
}
size *= track->textures[u].cpp;
+
switch (track->textures[u].tex_coord_type) {
case 0:
break;
@@ -2838,6 +2891,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_cb; i++) {
if (track->cb[i].robj == NULL) {
+ if (!(track->fastfill || track->color_channel_mask ||
+ track->blend_read_enable)) {
+ continue;
+ }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
@@ -2967,6 +3024,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
@@ -3265,6 +3323,7 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -3316,13 +3375,13 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
@@ -3346,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -3399,6 +3456,8 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -3427,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index ca50903..b27a699 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -28,6 +28,10 @@ struct r100_cs_cube_info {
unsigned height;
};
+#define R100_TRACK_COMP_NONE 0
+#define R100_TRACK_COMP_DXT1 1
+#define R100_TRACK_COMP_DXT35 2
+
struct r100_cs_track_texture {
struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
bool enabled;
bool roundup_w;
bool roundup_h;
+ unsigned compress_format;
};
struct r100_cs_track_limits {
@@ -62,13 +67,15 @@ struct r100_cs_track {
unsigned immd_dwords;
unsigned num_arrays;
unsigned max_indx;
+ unsigned color_channel_mask;
struct r100_cs_track_array arrays[11];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
-
+ bool fastfill;
+ bool blend_read_enable;
};
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eb740fc..ff1e0cd 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case 5:
case 6:
case 7:
+ /* 1D/2D */
track->textures[i].tex_coord_type = 0;
break;
case 1:
- track->textures[i].tex_coord_type = 1;
+ /* CUBE */
+ track->textures[i].tex_coord_type = 2;
break;
case 2:
- track->textures[i].tex_coord_type = 2;
+ /* 3D */
+ track->textures[i].tex_coord_type = 1;
break;
}
break;
@@ -401,7 +404,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
break;
- case R200_TXFORMAT_DXT1:
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565:
@@ -418,9 +420,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
+ track->textures[i].cpp = 4;
+ break;
+ case R200_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45:
- track->textures[i].cpp = 4;
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 83378c3..43b55a0 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
#include "rv350d.h"
#include "r300_reg_safe.h"
-/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ * However, scheduling such write to the ring seems harmless, i suspect
+ * the CP read collide with the flush somehow, or maybe the MC, hard to
+ * tell. (Jerome Glisse)
+ */
/*
* rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -493,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
+
tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
+ tmp &= R300_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0: rdev->mc.vram_width = 64; break;
+ case 1: rdev->mc.vram_width = 128; break;
+ case 2: rdev->mc.vram_width = 256; break;
+ default: rdev->mc.vram_width = 128; break;
}
r100_vram_init_sizes(rdev);
@@ -686,7 +702,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
break;
/* Tracked registers */
@@ -852,7 +876,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Z6Y5X5:
case R300_TX_FORMAT_W4Z4Y4X4:
case R300_TX_FORMAT_W1Z5Y5X5:
- case R300_TX_FORMAT_DXT1:
case R300_TX_FORMAT_D3DMFT_CxV8U8:
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
@@ -866,8 +889,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x17:
case R300_TX_FORMAT_FL_I32:
case 0x1e:
- case R300_TX_FORMAT_DXT3:
- case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 4;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
@@ -878,6 +899,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
break;
+ case R300_TX_FORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case R300_TX_FORMAT_ATI2N:
+ if (p->rdev->family < CHIP_R420) {
+ DRM_ERROR("Invalid texture format %u\n",
+ (idx_value & 0x1F));
+ return -EINVAL;
+ }
+ /* The same rules apply as for DXT3/5. */
+ /* Pass through. */
+ case R300_TX_FORMAT_DXT3:
+ case R300_TX_FORMAT_DXT5:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
default:
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
@@ -937,6 +975,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width_11 = tmp;
tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp;
+
+ /* ATI1N */
+ if (idx_value & (1 << 14)) {
+ /* The same rules apply as for DXT1. */
+ track->textures[i].compress_format =
+ R100_TRACK_COMP_DXT1;
+ }
+ } else if (idx_value & (1 << 14)) {
+ DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ return -EINVAL;
}
break;
case 0x4480:
@@ -978,6 +1026,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
+ case 0x4e0c:
+ /* RB3D_COLOR_CHANNEL_MASK */
+ track->color_channel_mask = idx_value;
+ break;
+ case 0x4d1c:
+ /* ZB_BW_CNTL */
+ track->fastfill = !!(idx_value & (1 << 2));
+ break;
+ case 0x4e04:
+ /* RB3D_BLENDCNTL */
+ track->blend_read_enable = !!(idx_value & (1 << 2));
+ break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
@@ -1214,6 +1274,7 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -1269,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1278,6 +1338,7 @@ void r300_fini(struct radeon_device *rdev)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
@@ -1324,6 +1385,8 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -1357,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
+ radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index cb2e470..34bffa0 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
int sz;
int addr;
int type;
- int clamp;
+ int isclamp;
int stride;
RING_LOCALS;
@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
- clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+ isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
addr |= (type << 16);
- addr |= (clamp << 17);
+ addr |= (isclamp << 17);
stride = type ? 4 : 6;
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 4b7afef..1735a2b 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -900,6 +900,7 @@
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
+# define R300_TX_FORMAT_ATI2N 0x1F
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a727..d937324 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
+#include "r100d.h"
#include "r420d.h"
+#include "r420_reg_safe.h"
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
int r420_mc_init(struct radeon_device *rdev)
{
@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ radeon_agp_disable(rdev);
} else {
rdev->mc.gtt_location = rdev->mc.agp_base;
}
@@ -165,6 +171,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
}
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+ /* RV410 and R420 can lock up if CP DMA to host memory happens
+ * while the 2D engine is busy.
+ *
+ * The proper workaround is to queue a RESYNC at the beginning
+ * of the CP init, apparently.
+ */
+ radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+ /* Catch the RESYNC we dispatched all the way back,
+ * at the very beginning of the CP init.
+ */
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev);
+ radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
static int r420_startup(struct radeon_device *rdev)
{
int r;
@@ -190,12 +224,14 @@ static int r420_startup(struct radeon_device *rdev)
r420_pipes_init(rdev);
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
+ r420_cp_errata_init(rdev);
r = r100_wb_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +274,7 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
+ r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -346,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
if (r)
return r;
}
- r300_set_reg_safe(rdev);
+ r420_set_reg_safe(rdev);
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r420_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b..ddf5731 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -293,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 36656bd..2ffcf5a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
}
}
}
- r600_irq_set(rdev);
+ if (rdev->irq.installed)
+ r600_irq_set(rdev);
}
void r600_hpd_fini(struct radeon_device *rdev)
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
return 0;
}
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
-void r600_hdp_flush(struct radeon_device *rdev)
-{
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-}
-
/*
* CP & Ring
*/
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.align_mask = 16 - 1;
}
+void r600_cp_fini(struct radeon_device *rdev)
+{
+ r600_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
+
/*
* GPU scratch registers helpers function.
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
}
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ int r;
+
+ mutex_lock(&rdev->r600_blit.mutex);
+ rdev->r600_blit.vb_ib = NULL;
+ r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (rdev->r600_blit.vb_ib)
+ radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ mutex_unlock(&rdev->r600_blit.mutex);
+ return r;
+ }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
+ mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
@@ -1862,18 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = r600_blit_init(rdev);
if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio resume failed\n");
+ return r;
+ }
+
return r;
}
@@ -1945,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+ }
return 0;
}
@@ -2016,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = r600_mc_init(rdev);
if (r)
return r;
@@ -2038,52 +2066,50 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
-
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
- r600_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
+
+ r = r600_audio_init(rdev);
+ if (r)
+ return r; /* TODO error handling */
return 0;
}
void r600_fini(struct radeon_device *rdev)
{
- /* Suspend operations */
- r600_suspend(rdev);
-
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2189,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
- rdev->ih.align_mask = 4 - 1;
+ rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+ rdev->ih.rptr = 0;
}
-static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+static int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
- rdev->ih.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2226,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
return r;
}
}
- rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->ih.rptr = 0;
-
return 0;
}
@@ -2378,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
/* allocate ring */
- ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ ret = r600_ih_ring_alloc(rdev);
if (ret)
return ret;
@@ -2441,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
return ret;
}
-void r600_irq_fini(struct radeon_device *rdev)
+void r600_irq_suspend(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_irq_suspend(rdev);
r600_ih_ring_fini(rdev);
}
@@ -2454,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
/* don't enable anything if the ih is disabled */
- if (!rdev->ih.enabled)
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
return 0;
+ }
if (ASIC_IS_DCE3(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2626,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- WARN_ON(1);
- /* XXX deal with overflow */
- DRM_ERROR("IH RB overflow\n");
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
- wptr = wptr & WPTR_OFFSET_MASK;
-
- return wptr;
+ return (wptr & rdev->ih.ptr_mask);
}
/* r600 IV Ring
@@ -2671,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 last_entry = rdev->ih.ring_size - 16;
u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
unsigned long flags;
bool queue_hotplug = false;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
spin_lock_irqsave(&rdev->ih.lock, flags);
@@ -2717,7 +2756,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2737,7 +2776,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2786,7 +2825,7 @@ restart_ih:
}
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
@@ -2800,15 +2839,13 @@ restart_ih:
DRM_DEBUG("IH: CP EOP\n");
break;
default:
- DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
/* wptr/rptr are in bytes! */
- if (rptr == last_entry)
- rptr = 0;
- else
- rptr += 16;
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
}
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
@@ -2876,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 0000000..0dcb690
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "atom.h"
+
+#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
+ || rdev->family == CHIP_RS600
+ || rdev->family == CHIP_RS690
+ || rdev->family == CHIP_RS740;
+}
+
+/*
+ * current number of channels
+ */
+static int r600_audio_channels(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
+}
+
+/*
+ * current bits per sample
+ */
+static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+{
+ uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
+ switch (value) {
+ case 0x0: return 8;
+ case 0x1: return 16;
+ case 0x2: return 20;
+ case 0x3: return 24;
+ case 0x4: return 32;
+ }
+
+ DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
+
+ return 16;
+}
+
+/*
+ * current sampling rate in HZ
+ */
+static int r600_audio_rate(struct radeon_device *rdev)
+{
+ uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+ uint32_t result;
+
+ if (value & 0x4000)
+ result = 44100;
+ else
+ result = 48000;
+
+ result *= ((value >> 11) & 0x7) + 1;
+ result /= ((value >> 8) & 0x7) + 1;
+
+ return result;
+}
+
+/*
+ * iec 60958 status bits
+ */
+static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+{
+ return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
+}
+
+/*
+ * iec 60958 category code
+ */
+static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+static void r600_audio_update_hdmi(unsigned long param)
+{
+ struct radeon_device *rdev = (struct radeon_device *)param;
+ struct drm_device *dev = rdev->ddev;
+
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
+ struct drm_encoder *encoder;
+ int changes = 0;
+
+ changes |= channels != rdev->audio_channels;
+ changes |= rate != rdev->audio_rate;
+ changes |= bps != rdev->audio_bits_per_sample;
+ changes |= status_bits != rdev->audio_status_bits;
+ changes |= category_code != rdev->audio_category_code;
+
+ if (changes) {
+ rdev->audio_channels = channels;
+ rdev->audio_rate = rate;
+ rdev->audio_bits_per_sample = bps;
+ rdev->audio_status_bits = status_bits;
+ rdev->audio_category_code = category_code;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (changes || r600_hdmi_buffer_status_changed(encoder))
+ r600_hdmi_update_audio_settings(
+ encoder, channels,
+ rate, bps, status_bits,
+ category_code);
+ }
+
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
+ * initialize the audio vars and register the update timer
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return 0;
+
+ DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+
+ rdev->audio_channels = -1;
+ rdev->audio_rate = -1;
+ rdev->audio_bits_per_sample = -1;
+ rdev->audio_status_bits = 0;
+ rdev->audio_category_code = 0;
+
+ setup_timer(
+ &rdev->audio_timer,
+ r600_audio_update_hdmi,
+ (unsigned long)rdev);
+
+ mod_timer(&rdev->audio_timer, jiffies + 1);
+
+ return 0;
+}
+
+/*
+ * determin how the encoders and audio interface is wired together
+ */
+int r600_audio_tmds_index(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ /* special case check if an TMDS1 is present */
+ list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
+ if (to_radeon_encoder(other)->encoder_id ==
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1)
+ return 1;
+ }
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ return 1;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return -1;
+ }
+}
+
+/*
+ * atach the audio codec to the clock source of the encoder
+ */
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int base_rate = 48000;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
+ break;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
+
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ }
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return;
+
+ del_timer(&rdev->audio_timer);
+ WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb..446b765 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -512,14 +513,16 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
+ if (rdev->r600_blit.shader_obj == NULL)
+ return;
+ /* If we can't reserve the bo, unref should be enough to destroy
+ * it when it becomes idle.
+ */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0)) {
- dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
- goto out_unref;
+ if (!r) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-out_unref:
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
@@ -540,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
- mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
@@ -555,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
int dwords_per_loop = 76, num_loops;
r = r600_vb_ib_get(rdev);
- WARN_ON(r);
+ if (r)
+ return r;
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
@@ -577,11 +578,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 5; /* fence emit for VB IB */
+ ring_size += 7; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 5; /* fence emit for done copy */
+ ring_size += 7; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
- WARN_ON(r);
+ if (r)
+ return r;
set_default_state(rdev); /* 14 */
set_shaders(rdev); /* 26 */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711..75bcf35 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ dev_priv->r600_max_backends,
+ (0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0d82076..e4c45ec 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+struct r600_cs_track {
+ u32 cb_color0_base_last;
+};
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -170,13 +174,35 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
idx, relocs_chunk->length_dw);
return -EINVAL;
}
- *cs_reloc = &p->relocs[0];
+ *cs_reloc = p->relocs;
(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
return 0;
}
/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
+}
+
+/**
* r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_reloc *reloc;
+ struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
unsigned i;
@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
+ track = (struct r600_cs_track *)p->track;
ib = p->ib->ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color0_base_last) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] = track->cb_color0_base_last;
+ printk_once(KERN_WARNING "radeon: You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case DB_DEPTH_BASE:
case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color0_base_last = ib[idx+1+i];
+ break;
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
case CB_COLOR3_BASE:
@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
int r600_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
+ struct r600_cs_track *track;
int r;
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ p->track = track;
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
@@ -717,7 +799,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
if (p->chunk_relocs_idx == -1) {
return 0;
}
- p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
+ parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 0000000..fcc949d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+ RGB = 0,
+ YCC_422 = 1,
+ YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+ AUDIO_STATUS_DIG_ENABLE = 0x01,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_EMPHASIS = 0x08,
+ AUDIO_STATUS_COPYRIGHT = 0x10,
+ AUDIO_STATUS_NONAUDIO = 0x20,
+ AUDIO_STATUS_PROFESSIONAL = 0x40,
+ AUDIO_STATUS_LEVEL = 0x80
+};
+
+struct {
+ uint32_t Clock;
+
+ int N_32kHz;
+ int CTS_32kHz;
+
+ int N_44_1kHz;
+ int CTS_44_1kHz;
+
+ int N_48kHz;
+ int CTS_48kHz;
+
+} r600_hdmi_ACR[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+};
+
+/*
+ * calculate CTS value if it's not found in the table
+ */
+static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+{
+ if (*CTS == 0)
+ *CTS = clock*N/(128*freq)*1000;
+ DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+ N, *CTS, freq);
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int CTS;
+ int N;
+ int i;
+
+ for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
+
+ CTS = r600_hdmi_ACR[i].CTS_32kHz;
+ N = r600_hdmi_ACR[i].N_32kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
+ WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_32kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
+ N = r600_hdmi_ACR[i].N_44_1kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
+ WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_44_1kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_48kHz;
+ N = r600_hdmi_ACR[i].N_48kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
+ WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_48kHz_N, N);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void r600_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ enum r600_hdmi_color_format color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_audioinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t channel_count,
+ uint8_t coding_type,
+ uint8_t sample_size,
+ uint8_t sample_frequency,
+ uint8_t format,
+ uint8_t channel_allocation,
+ uint8_t level_shift,
+ int downmix_inhibit
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[11];
+
+ frame[0x0] = 0;
+ frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
+ frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
+ frame[0x3] = format;
+ frame[0x4] = channel_allocation;
+ frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
+ frame[0x6] = 0;
+ frame[0x7] = 0;
+ frame[0x8] = 0;
+ frame[0x9] = 0;
+ frame[0xA] = 0;
+
+ r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int status, result;
+
+ if (!radeon_encoder->hdmi_offset)
+ return 0;
+
+ status = r600_hdmi_is_audio_buffer_filled(encoder);
+ result = radeon_encoder->hdmi_buffer_status != status;
+ radeon_encoder->hdmi_buffer_status = status;
+
+ return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = radeon_encoder->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ if (r600_hdmi_is_audio_buffer_filled(encoder)) {
+ /* disable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+
+ } else if (radeon_encoder->hdmi_audio_workaround) {
+ /* enable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+
+ } else {
+ /* disable audio workaround and stop delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ }
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
+ WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
+ WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+
+ WREG32(offset+R600_HDMI_VERSION, 0x202);
+
+ r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* audio packets per line, does anyone know how to calc this ? */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint32_t iec;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+ r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+ channels, rate, bps);
+ DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+ (int)status_bits, (int)category_code);
+
+ iec = 0;
+ if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ iec |= 1 << 0;
+ if (status_bits & AUDIO_STATUS_NONAUDIO)
+ iec |= 1 << 1;
+ if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ iec |= 1 << 2;
+ if (status_bits & AUDIO_STATUS_EMPHASIS)
+ iec |= 1 << 3;
+
+ iec |= category_code << 8;
+
+ switch (rate) {
+ case 32000: iec |= 0x3 << 24; break;
+ case 44100: iec |= 0x0 << 24; break;
+ case 88200: iec |= 0x8 << 24; break;
+ case 176400: iec |= 0xc << 24; break;
+ case 48000: iec |= 0x2 << 24; break;
+ case 96000: iec |= 0xa << 24; break;
+ case 192000: iec |= 0xe << 24; break;
+ }
+
+ WREG32(offset+R600_HDMI_IEC60958_1, iec);
+
+ iec = 0;
+ switch (bps) {
+ case 16: iec |= 0x2; break;
+ case 20: iec |= 0x3; break;
+ case 24: iec |= 0xb; break;
+ }
+ if (status_bits & AUDIO_STATUS_V)
+ iec |= 0x5 << 16;
+
+ WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
+
+ /* 0x021 or 0x031 sets the audio frame length */
+ WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
+ r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
+}
+
+/*
+ * enable/disable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
+
+ /* some version of atombios ignore the enable HDMI flag
+ * so enabling/disabling HDMI was moved here for TMDS1+2 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* This part is doubtfull in my opinion */
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
+ break;
+
+ default:
+ DRM_ERROR("unknown HDMI output type\n");
+ break;
+ }
+}
+
+/*
+ * determin at which register offset the HDMI encoder is
+ */
+void r600_hdmi_init(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+ case 1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ radeon_encoder->hdmi_offset = R600_HDMI_DIG;
+ break;
+
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+
+ DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+
+ /* TODO: make this configureable */
+ radeon_encoder->hdmi_audio_workaround = 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index e2d1f5f..d0e28ff 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,5 +110,79 @@
#define R600_BIOS_6_SCRATCH 0x173c
#define R600_BIOS_7_SCRATCH 0x1740
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL 0x0514
+#define R600_AUDIO_PLL1_DIV 0x0518
+#define R600_AUDIO_PLL2_MUL 0x0524
+#define R600_AUDIO_PLL2_DIV 0x0528
+#define R600_AUDIO_CLK_SRCSEL 0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE 0x7300
+#define R600_AUDIO_TIMING 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID 0x7380
+#define R600_AUDIO_REVISION_ID 0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
+#define R600_AUDIO_NID1_NODE_COUNT 0x738c
+#define R600_AUDIO_NID1_TYPE 0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
+#define R600_AUDIO_SUPPORTED_CODEC 0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS 0x73a0
+#define R600_AUDIO_NID3_CAPS 0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN 0x73ac
+#define R600_AUDIO_CONN_LIST 0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
+#define R600_AUDIO_PLAYING 0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
+#define R600_AUDIO_PIN_SENSE 0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
+#define R600_AUDIO_STATUS_BITS 0x73d8
+
+/* HDMI base register addresses */
+#define R600_HDMI_TMDS1 0x7400
+#define R600_HDMI_TMDS2 0x7700
+#define R600_HDMI_DIG 0x7800
+
+/* HDMI registers */
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+#define R600_HDMI_CNTL 0x08
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 05894ed..3048088 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -882,4 +882,29 @@
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+
+#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
+#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280E0_BASE_256B 0x00000000
+#define R_0280E4_CB_COLOR1_FRAG 0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG 0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG 0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG 0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG 0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG 0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG 0x0280FC
+#define R_0280C0_CB_COLOR0_TILE 0x0280C0
+#define S_0280C0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0280C0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0280C0_BASE_256B 0x00000000
+#define R_0280C4_CB_COLOR1_TILE 0x0280C4
+#define R_0280C8_CB_COLOR2_TILE 0x0280C8
+#define R_0280CC_CB_COLOR3_TILE 0x0280CC
+#define R_0280D0_CB_COLOR4_TILE 0x0280D0
+#define R_0280D4_CB_COLOR5_TILE 0x0280D4
+#define R_0280D8_CB_COLOR6_TILE 0x0280D8
+#define R_0280DC_CB_COLOR7_TILE 0x0280DC
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c938bb5..c0356bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,12 +89,14 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
+extern int radeon_audio;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
@@ -161,6 +163,7 @@ struct radeon_fence_driver {
struct list_head created;
struct list_head emited;
struct list_head signaled;
+ bool initialized;
};
struct radeon_fence {
@@ -201,8 +204,9 @@ struct radeon_surface_reg {
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
};
struct radeon_bo {
@@ -316,10 +320,12 @@ struct radeon_mc {
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
+ bool igp_sideport_enabled;
};
int radeon_mc_setup(struct radeon_device *rdev);
-
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
/*
* GPU scratch registers structures, functions & helpers
@@ -358,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/
struct radeon_ib {
struct list_head list;
- unsigned long idx;
+ unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
- uint32_t *ptr;
+ uint32_t *ptr;
uint32_t length_dw;
+ bool free;
};
/*
@@ -372,10 +379,9 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
- struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
- DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+ unsigned head_id;
};
struct radeon_cp {
@@ -405,13 +411,13 @@ struct r600_ih {
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
- uint32_t align_mask;
uint32_t ptr_mask;
spinlock_t lock;
bool enabled;
};
struct r600_blit {
+ struct mutex mutex;
struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
@@ -460,6 +466,7 @@ struct radeon_cs_chunk {
};
struct radeon_cs_parser {
+ struct device *dev;
struct radeon_device *rdev;
struct drm_file *filp;
/* chunks */
@@ -651,11 +658,17 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
- void (*hdp_flush)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
};
/*
@@ -664,11 +677,14 @@ struct radeon_asic {
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
};
struct r300_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
};
struct r600_asic {
@@ -814,6 +830,14 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
+
+ /* audio stuff */
+ struct timer_list audio_timer;
+ int audio_channels;
+ int audio_rate;
+ int audio_bits_per_sample;
+ uint8_t audio_status_bits;
+ uint8_t audio_category_code;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -832,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -842,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- if (reg < 0x10000)
+ if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
@@ -996,13 +1020,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
-#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
+/* AGP */
+extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1016,6 +1041,7 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1125,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
+extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1145,6 +1172,22 @@ extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
+extern void r600_irq_suspend(struct radeon_device *rdev);
+/* r600 audio */
+extern int r600_audio_init(struct radeon_device *rdev);
+extern int r600_audio_tmds_index(struct drm_encoder *encoder);
+extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern void r600_audio_fini(struct radeon_device *rdev);
+extern void r600_hdmi_init(struct drm_encoder *encoder);
+extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code);
#include "radeon_object.h"
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 54bf49a..c0681a55 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_info(rdev->ddev, &info);
if (ret) {
+ drm_agp_release(rdev->ddev);
DRM_ERROR("Unable to get AGP info: %d\n", ret);
return ret;
}
+
+ if (rdev->ddev->agp->agp_info.aper_size < 32) {
+ drm_agp_release(rdev->ddev);
+ dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+ "need at least 32M, disabling AGP\n",
+ rdev->ddev->agp->agp_info.aper_size);
+ return -EINVAL;
+ }
+
mode.mode = info.mode;
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
ret = drm_agp_enable(rdev->ddev, mode);
if (ret) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+ drm_agp_release(rdev->ddev);
return ret;
}
@@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
void radeon_agp_fini(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
- drm_agp_release(rdev->ddev);
- }
+ if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+ drm_agp_release(rdev->ddev);
}
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 636116b..05ee1ae 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -33,6 +33,7 @@
*/
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
@@ -76,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
-void r100_hdp_flush(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -106,18 +106,18 @@ static struct radeon_asic r100_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -166,18 +166,18 @@ static struct radeon_asic r300_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -217,11 +217,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -259,18 +259,18 @@ static struct radeon_asic rs400_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -323,11 +323,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -371,11 +371,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -423,11 +423,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -466,11 +466,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -507,12 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
-void r600_hdp_flush(struct radeon_device *rdev);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -543,11 +543,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
/*
@@ -588,11 +588,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 12a0c76..4d88315 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true;
+ break;
}
}
@@ -205,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* Asrock RS600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x1849) &&
+ (dev->pdev->subsystem_device == 0x7941)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
@@ -286,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+ * even though they are wired up to record 0x93
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+ (dev->pdev->subsystem_device == 0x2452)) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
return true;
}
@@ -345,7 +364,9 @@ const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
- DRM_MODE_CONNECTOR_DisplayPort
+ DRM_MODE_CONNECTOR_DisplayPort,
+ DRM_MODE_CONNECTOR_eDP,
+ DRM_MODE_CONNECTOR_Unknown
};
bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -745,8 +766,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
- (1 <<
- i),
+ (1 << i),
dac),
(1 << i));
}
@@ -758,32 +778,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
- if (((bios_connectors[i].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[j].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))
- ||
- ((bios_connectors[j].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[i].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))) {
- bios_connectors[i].
- devices |=
- bios_connectors[j].
- devices;
- bios_connectors[i].
- connector_type =
- DRM_MODE_CONNECTOR_DVII;
- if (bios_connectors[j].devices &
- (ATOM_DEVICE_DFP_SUPPORT))
+ /* make sure not to combine LVDS */
+ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[i].line_mux = 53;
+ bios_connectors[i].ddc_bus.valid = false;
+ continue;
+ }
+ if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[j].line_mux = 53;
+ bios_connectors[j].ddc_bus.valid = false;
+ continue;
+ }
+ /* combine analog and digital for DVI-I */
+ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+ ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+ bios_connectors[i].devices |=
+ bios_connectors[j].devices;
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
bios_connectors[i].hpd =
bios_connectors[j].hpd;
- bios_connectors[j].
- valid = false;
+ bios_connectors[j].valid = false;
}
}
}
@@ -938,6 +956,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
return false;
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (igp_info) {
+ switch (crev) {
+ case 1:
+ if (igp_info->info.ucMemoryType & 0xf0)
+ return true;
+ break;
+ case 2:
+ if (igp_info->info_2.ucMemoryType & 0x0f)
+ return true;
+ break;
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ break;
+ }
+ }
+ return false;
+}
+
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
@@ -1029,6 +1084,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ break;
}
}
}
@@ -1234,6 +1290,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
return true;
}
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+ uint16_t data_offset;
+ uint8_t frev, crev;
+ struct _ATOM_ANALOG_TV_INFO *tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_INFO("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_INFO("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_INFO("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+ return tv_std;
+}
+
struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
{
@@ -1269,6 +1380,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
}
return tv_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b..7932dc4 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
+
+ /* r100 doesn't have dma engine so skip the test */
+ if (rdev->asic->copy_dma) {
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE, fence);
+
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
- if (r) {
- goto out_cleanup;
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+ " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+ n, size >> 10,
+ sdomain, ddomain, time,
+ i, i * 1000, (i * 1000) / 1024);
}
- r = radeon_fence_wait(fence, false);
- if (r) {
- goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
- " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
- sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
+
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index b062109..73c4405 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -56,13 +56,13 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
else if (post_div == 3)
sclk >>= 2;
else if (post_div == 4)
- sclk >>= 4;
+ sclk >>= 3;
return sclk;
}
/* 10 khz */
-static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
{
struct radeon_pll *mpll = &rdev->clock.mpll;
uint32_t fb_div, ref_div, post_div, mclk;
@@ -86,7 +86,7 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
else if (post_div == 3)
mclk >>= 2;
else if (post_div == 4)
- mclk >>= 4;
+ mclk >>= 3;
return mclk;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c5021a3..e7b1944 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
return false;
}
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 igp_info;
+
+ igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+ if (igp_info) {
+ if (RBIOS16(igp_info + 0x4))
+ return true;
+ }
+ return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+ 0x00000808, /* r100 */
+ 0x00000808, /* rv100 */
+ 0x00000808, /* rs100 */
+ 0x00000808, /* rv200 */
+ 0x00000808, /* rs200 */
+ 0x00000808, /* r200 */
+ 0x00000808, /* rv250 */
+ 0x00000000, /* rs300 */
+ 0x00000808, /* rv280 */
+ 0x00000808, /* r300 */
+ 0x00000808, /* r350 */
+ 0x00000808, /* rv350 */
+ 0x00000808, /* rv380 */
+ 0x00000808, /* r420 */
+ 0x00000808, /* r423 */
+ 0x00000808, /* rv410 */
+ 0x00000000, /* rs400 */
+ 0x00000000, /* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_primary_dac *p_dac)
+{
+ p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+ return;
+}
+
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
radeon_encoder
*encoder)
@@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
+ int found = 0;
- if (rdev->bios == NULL)
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
return NULL;
+ if (rdev->bios == NULL)
+ goto out;
+
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
- p_dac =
- kzalloc(sizeof(struct radeon_encoder_primary_dac),
- GFP_KERNEL);
-
- if (!p_dac)
- return NULL;
-
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,20 +670,26 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
-
+ found = 1;
}
+out:
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
return p_dac;
}
-static enum radeon_tv_std
-radeon_combios_get_tv_info(struct radeon_encoder *encoder)
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
{
- struct drm_device *dev = encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *dev = rdev->ddev;
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
+ if (rdev->bios == NULL)
+ return tv_std;
+
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
@@ -779,7 +827,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
found = 1;
}
- tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
+ tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
if (!found) {
/* then check CRT table */
@@ -923,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
- if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
- lvds->panel_vcc_delay = 2000;
+ lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5eece18..65f8194 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder)
dp_link_train(connector->encoder, connector);
@@ -208,6 +210,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
drm_mode_set_name(mode);
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ } else if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0) {
+ /* mac laptops without an edid */
+ /* Note that this is not necessarily the exact panel mode,
+ * but an approximation based on the cvt formula. For these
+ * systems we should ideally read the mode info out of the
+ * registers or add a mode table, but this works and is much
+ * simpler.
+ */
+ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
@@ -566,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
- bool dret;
+ bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -603,7 +619,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
ret = connector_status_connected;
}
} else {
- if (radeon_connector->dac_load_detect) {
+ if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
@@ -726,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret;
+ bool dret = false;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -762,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
- if (radeon_connector->shared_ddc && connector_status_connected) {
+ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
@@ -886,10 +904,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
static int radeon_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* XXX check mode bandwidth */
+ /* clocks over 135 MHz have heat issues with DVI on RV100 */
+ if (radeon_connector->use_digital &&
+ (rdev->family == CHIP_RV100) &&
+ (mode->clock > 135000))
+ return MODE_CLOCK_HIGH;
+
if (radeon_connector->use_digital && (mode->clock > 165000)) {
if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
@@ -955,7 +981,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
}
sink_type = radeon_dp_getsinktype(radeon_connector);
- if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
@@ -980,7 +1007,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(radeon_connector, mode);
else
return MODE_OK;
@@ -1032,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
}
if (radeon_connector->ddc_bus && i2c_bus->valid) {
- if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
- sizeof(struct radeon_i2c_bus_rec)) == 0) {
+ if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
@@ -1133,6 +1160,7 @@ radeon_add_atom_connector(struct drm_device *dev,
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
@@ -1145,10 +1173,16 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
if (i2c_bus->valid) {
/* add DP i2c bus */
- radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+ else
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
+ else
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1171,7 +1205,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
- 1);
+ radeon_atombios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1312,10 +1346,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
- 1);
+ radeon_connector->dac_load_detect);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
- 1);
+ radeon_combios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2..06123ba 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
+ kfree(master_priv);
return ret;
}
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 65590a0..e9d0850 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
&p->validated);
}
}
- return radeon_bo_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (error) {
- radeon_bo_list_unvalidate(&parser->validated,
- parser->ib->fence);
- } else {
- radeon_bo_list_unreserve(&parser->validated);
+ if (!error && parser->ib) {
+ radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
+ radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
mutex_lock(&parser->rdev->ddev->struct_mutex);
@@ -231,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
+ parser.dev = rdev->dev;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 02bcdb1..768b150 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -391,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
/* FIXME: not supported yet */
return -EINVAL;
}
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
+ }
+
return 0;
}
@@ -481,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
@@ -537,11 +544,75 @@ void radeon_agp_disable(struct radeon_device *rdev)
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+void radeon_check_arguments(struct radeon_device *rdev)
+{
+ /* vramlimit must be a power of two */
+ switch (radeon_vram_limit) {
+ case 0:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+ radeon_vram_limit);
+ radeon_vram_limit = 0;
+ break;
+ }
+ radeon_vram_limit = radeon_vram_limit << 20;
+ /* gtt size must be power of two and greater or equal to 32M */
+ switch (radeon_gart_size) {
+ case 4:
+ case 8:
+ case 16:
+ dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ /* AGP mode can only be -1, 1, 2, 4, 8 */
+ switch (radeon_agpmode) {
+ case -1:
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+ "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+ radeon_agpmode = 0;
+ break;
+ }
}
-/*
- * Radeon device.
- */
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -580,9 +651,9 @@ int radeon_device_init(struct radeon_device *rdev,
/* Set asic functions */
r = radeon_asic_init(rdev);
- if (r) {
+ if (r)
return r;
- }
+ radeon_check_arguments(rdev);
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
@@ -663,16 +734,18 @@ void radeon_device_fini(struct radeon_device *rdev)
*/
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
- struct radeon_device *rdev = dev->dev_private;
+ struct radeon_device *rdev;
struct drm_crtc *crtc;
int r;
- if (dev == NULL || rdev == NULL) {
+ if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW) {
return 0;
}
+ rdev = dev->dev_private;
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a133b83..7e17a36 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
"INTERNAL_UNIPHY2",
};
-static const char *connector_names[13] = {
+static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
"DisplayPort",
"HDMI-A",
"HDMI-B",
+ "TV",
+ "eDP",
};
static const char *hpd_names[7] = {
@@ -276,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -286,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+ }
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -329,8 +340,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- } else
+ } else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
+ if (ret == false)
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
@@ -349,9 +363,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
- if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
- if (dig->dp_i2c_bus)
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
}
if (!radeon_connector->ddc_bus)
@@ -404,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
+ uint32_t min_post_div = pll->min_post_div;
+ uint32_t max_post_div = pll->max_post_div;
uint32_t min_fractional_feed_div = 0;
uint32_t max_fractional_feed_div = 0;
uint32_t best_vco = pll->best_vco;
@@ -424,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
- if (flags & RADEON_PLL_USE_REF_DIV)
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
while (min_ref_div < max_ref_div-1) {
@@ -439,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
}
}
- if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ min_post_div = max_post_div = pll->post_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
min_fractional_feed_div = pll->min_frac_feedback_div;
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
+ for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
uint32_t ref_div;
- if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
continue;
/* legacy radeons only have a few post_divs */
- if (flags & RADEON_PLL_LEGACY) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
if ((post_div == 5) ||
(post_div == 7) ||
(post_div == 9) ||
@@ -498,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
current_freq = radeon_div(tmp, ref_div * post_div);
- if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+ if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
error = freq - current_freq;
error = error < 0 ? 0xffffffff : error;
} else
@@ -525,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
best_freq = current_freq;
best_error = error;
best_vco_diff = vco_diff;
- } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
- ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
- ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ } else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+ ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
best_post_div = post_div;
best_ref_div = ref_div;
best_feedback_div = feedback_div;
@@ -566,8 +586,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags)
+ uint32_t *post_div_p)
{
fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
fixed20_12 pll_out_max, pll_out_min;
@@ -661,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
radeonfb_remove(dev, fb);
if (radeon_fb->obj) {
- radeon_gem_object_unpin(radeon_fb->obj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(radeon_fb->obj);
mutex_unlock(&dev->struct_mutex);
@@ -709,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
-
+ if (obj == NULL) {
+ dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+ "can't create framebuffer\n", mode_cmd->handle);
+ return NULL;
+ }
return radeon_framebuffer_create(dev, mode_cmd, obj);
}
@@ -739,7 +761,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
-int radeon_modeset_create_props(struct radeon_device *rdev)
+static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c5c45e6..8ba3de7 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -87,6 +87,7 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = 1;
+int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -124,6 +125,9 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444);
+MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -192,7 +196,7 @@ static struct drm_driver driver_old = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
@@ -280,7 +284,7 @@ static struct drm_driver kms_driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e137852..c57ad60 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
* 1.29- R500 3D cmd buffer support
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 31
+#define DRIVER_MINOR 32
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4f23ec..3c91724 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -156,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
return ret;
}
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -202,7 +222,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->devices & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -233,6 +253,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
if (!ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
}
adjusted_mode->base.id = mode_id;
}
@@ -438,6 +460,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
+ int hdmi_detected = 0;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
struct drm_connector *connector;
@@ -458,6 +481,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (!radeon_connector->con_priv)
return;
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ hdmi_detected = 1;
+
dig_connector = radeon_connector->con_priv;
memset(&args, 0, sizeof(args));
@@ -487,13 +513,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
if (dig_connector->linkb)
@@ -512,7 +538,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -520,18 +546,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 5)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
- if (dig->lvds_misc & (1 << 6)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lvds_misc >> 2) & 0x3) == 2)
+ if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
@@ -552,7 +578,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
+ r600_hdmi_enable(encoder, hdmi_detected);
}
int
@@ -590,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = radeon_connector->con_priv;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
break;
- case CONNECTOR_DVI_A:
- case CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
- case CONNECTOR_STV:
- case CONNECTOR_CTV:
- case CONNECTOR_DIN:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
@@ -668,31 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = dig->dig_block + 1;
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
- num = 1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- num = 2;
- break;
- }
- }
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ num = dig->dig_encoder + 1;
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
@@ -814,7 +822,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_block)
+ if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
@@ -841,17 +849,16 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1;
}
} else {
+
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+ if (dig->dig_encoder)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
if (dig_connector->igp_lane_info & 0x3)
@@ -870,10 +877,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
}
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- break;
}
if (radeon_encoder->pixel_clock > 165000)
@@ -893,7 +896,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -1039,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
union crtc_sourc_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
+ struct radeon_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args));
@@ -1102,40 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (ASIC_IS_DCE32(rdev)) {
- if (radeon_crtc->crtc_id)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else {
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
-
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return;
- radeon_connector = to_radeon_connector(connector);
- if (!radeon_connector->con_priv)
- return;
- dig_connector = radeon_connector->con_priv;
-
- /* XXX doesn't really matter which dig encoder we pick as long as it's
- * not already in use
- */
- if (dig_connector->linkb)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- }
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ dig = radeon_encoder->enc_priv;
+ if (dig->dig_encoder)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* Only dig2 encoder can drive LVTMA */
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
@@ -1162,7 +1141,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -1196,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *test_encoder;
+ struct radeon_encoder_atom_dig *dig;
+ uint32_t dig_enc_in_use = 0;
+ /* on DCE32 and encoder can driver any block so just crtc id */
+ if (ASIC_IS_DCE32(rdev)) {
+ return radeon_crtc->crtc_id;
+ }
+
+ /* on DCE3 - LVTMA can only be driven by DIGB */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_test_encoder;
+
+ if (encoder == test_encoder)
+ continue;
+
+ if (!radeon_encoder_is_digital(test_encoder))
+ continue;
+
+ radeon_test_encoder = to_radeon_encoder(test_encoder);
+ dig = radeon_test_encoder->enc_priv;
+
+ if (dig->dig_encoder >= 0)
+ dig_enc_in_use |= (1 << dig->dig_encoder);
+ }
+
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+ if (dig_enc_in_use & 0x2)
+ DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+ return 1;
+ }
+ if (!(dig_enc_in_use & 1))
+ return 0;
+ return 1;
+}
+
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
@@ -1208,12 +1227,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
-
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1265,6 +1281,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1371,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (radeon_encoder_is_digital(encoder)) {
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
radeon_encoder->active_device = 0;
}
@@ -1428,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
/* coherent mode by default */
dig->coherent_mode = true;
+ dig->dig_encoder = -1;
return dig;
}
@@ -1510,4 +1535,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
+
+ r600_hdmi_init(encoder);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d..d71e346 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- memset_io(fbptr, 0xff, aligned_size);
+ memset_io(fbptr, 0x0, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index cb4cd97..8495d4e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
bool radeon_fence_signaled(struct radeon_fence *fence)
{
- struct radeon_device *rdev = fence->rdev;
unsigned long irq_flags;
bool signaled = false;
- if (rdev->gpu_lockup) {
+ if (!fence)
return true;
- }
- if (fence == NULL) {
+
+ if (fence->rdev->gpu_lockup)
return true;
- }
+
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
@@ -324,7 +323,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
- DRM_ERROR("Fence failed to get a scratch register.");
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
@@ -335,9 +334,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
+ rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for fence !\n");
+ dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
@@ -346,11 +346,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
+ if (!rdev->fence_drv.initialized)
+ return;
wake_up_all(&rdev->fence_drv.queue);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- DRM_INFO("radeon: fence finalized\n");
+ rdev->fence_drv.initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 2944486..db8e9a3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -66,8 +66,9 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
}
r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
- size, initial_domain, alignment);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
mutex_lock(&rdev->ddev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
@@ -130,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
- radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -308,10 +308,12 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (robj->rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- radeon_hdp_flush(robj->rdev);
return r;
}
@@ -350,9 +352,10 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
rbo = gobj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
- return r;
+ goto out;
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
+out:
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index a1bf11d..48b7cea 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
&init->gart_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
}
typedef struct drm_radeon_clear32 {
@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
&clr->depth_boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
}
typedef struct drm_radeon_stipple32 {
@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
&request->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
}
typedef struct drm_radeon_tex_image32 {
@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
&image->data))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
}
typedef struct drm_radeon_vertex2_32 {
@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
&request->prim))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
}
typedef struct drm_radeon_cmd_buffer32 {
@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
&request->boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
}
typedef struct drm_radeon_getparam32 {
@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
}
typedef struct drm_radeon_mem_alloc32 {
@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
}
typedef struct drm_radeon_irq_emit32 {
@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
}
#else
#define compat_radeon_cp_setparam NULL
@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- lock_kernel(); /* XXX for now */
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4..2f349a3 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_emit_t *emit = data;
int result;
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
- return -EINVAL;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
result = radeon_emit_irq(dev);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9223296..3cfd60f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
rdev->irq.crtc_vblank_int[i] = false;
+ rdev->irq.hpd[i] = false;
}
radeon_irq_set(rdev);
}
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
DRM_INFO("radeon: using MSI.\n");
}
}
- drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
+ r = drm_irq_install(rdev->ddev);
+ if (r) {
+ rdev->irq.installed = false;
+ return r;
+ }
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
+ drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
- rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev);
+ rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b82ede9..b6d8081 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc,
}
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -340,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-/* properly set crtc bpp when using atombios */
-void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int format;
- uint32_t crtc_gen_cntl;
- uint32_t disp_merge_cntl;
- uint32_t crtc_pitch;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- format = 2;
- break;
- case 15: /* 555 */
- format = 3;
- break;
- case 16: /* 565 */
- format = 4;
- break;
- case 24: /* RGB */
- format = 5;
- break;
- case 32: /* xRGB */
- format = 6;
- break;
- default:
- return;
- }
-
- crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
- ((crtc->fb->bits_per_pixel * 8) - 1)) /
- (crtc->fb->bits_per_pixel * 8));
- crtc_pitch |= crtc_pitch << 16;
-
- WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
-
- switch (radeon_crtc->crtc_id) {
- case 0:
- disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
- WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
- WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
- break;
- case 1:
- disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
- disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
- WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
-
- crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
- crtc_gen_cntl |= (format << 8);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
- WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
- WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
- break;
- }
-}
-
int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -756,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
uint32_t post_divider = 0;
uint32_t freq = 0;
uint8_t pll_gain;
- int pll_flags = RADEON_PLL_LEGACY;
bool use_bios_divs = false;
/* PLL registers */
uint32_t pll_ref_div = 0;
@@ -790,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p1pll;
+ pll->flags = RADEON_PLL_LEGACY;
+
if (mode->clock > 200000) /* range limits??? */
- pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
- pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
@@ -805,7 +742,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
- pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
if (!rdev->is_atom_bios) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -820,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
}
- pll_flags |= RADEON_PLL_USE_REF_DIV;
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
}
}
}
@@ -830,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
&freq, &feedback_div, &frac_fb_div,
- &reference_div, &post_divider,
- pll_flags);
+ &reference_div, &post_divider);
for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
if (post_div->divider == post_divider)
@@ -1059,7 +995,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_set_pll(crtc, adjusted_mode);
radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
- radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
+ radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
} else {
if (radeon_crtc->rmx_type != RMX_OFF) {
/* FIXME: only first crtc has rmx what should we
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index df00515..38e45e2 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
+ bool is_mac = false;
DRM_DEBUG("\n");
if (radeon_encoder->enc_priv) {
@@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
}
}
+ /* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+ * Taken from radeonfb.
+ */
+ if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+ (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+ is_mac = true;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
@@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
+ if (is_mac)
+ lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
@@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
- lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ if (is_mac) {
+ lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+ } else {
+ WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ }
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
@@ -207,6 +226,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
*adjusted_mode = *native_mode;
adjusted_mode->hdisplay = mode->hdisplay;
adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
adjusted_mode->base.id = mode_id;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3a12bb0..417684d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
unsigned pix_to_tv;
};
-static const uint16_t hor_timing_NTSC[] = {
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x003f,
0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
0
};
-static const uint16_t vert_timing_NTSC[] = {
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200d,
0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
0
};
-static const uint16_t hor_timing_PAL[] = {
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x0058,
0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
0
};
-static const uint16_t vert_timing_PAL[] = {
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200c,
0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
}
flicker_removal = (tmp + 500) / 1000;
- if (flicker_removal < 3)
- flicker_removal = 3;
- for (i = 0; i < 6; ++i) {
+ if (flicker_removal < 2)
+ flicker_removal = 2;
+ for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 44d4b65..e81b2ae 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,32 +46,6 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
-enum radeon_connector_type {
- CONNECTOR_NONE,
- CONNECTOR_VGA,
- CONNECTOR_DVI_I,
- CONNECTOR_DVI_D,
- CONNECTOR_DVI_A,
- CONNECTOR_STV,
- CONNECTOR_CTV,
- CONNECTOR_LVDS,
- CONNECTOR_DIGITAL,
- CONNECTOR_SCART,
- CONNECTOR_HDMI_TYPE_A,
- CONNECTOR_HDMI_TYPE_B,
- CONNECTOR_0XC,
- CONNECTOR_0XD,
- CONNECTOR_DIN,
- CONNECTOR_DISPLAY_PORT,
- CONNECTOR_UNSUPPORTED
-};
-
-enum radeon_dvi_type {
- DVI_AUTO,
- DVI_DIGITAL,
- DVI_ANALOG
-};
-
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -88,6 +62,7 @@ enum radeon_tv_std {
TV_STD_SCART_PAL,
TV_STD_SECAM,
TV_STD_PAL_CN,
+ TV_STD_PAL_N,
};
/* radeon gpio-based i2c
@@ -150,16 +125,24 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV (1 << 12)
struct radeon_pll {
- uint16_t reference_freq;
- uint16_t reference_div;
+ /* reference frequency */
+ uint32_t reference_freq;
+
+ /* fixed dividers */
+ uint32_t reference_div;
+ uint32_t post_div;
+
+ /* pll in/out limits */
uint32_t pll_in_min;
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
- uint16_t xclk;
+ uint32_t best_vco;
+ /* divider limits */
uint32_t min_ref_div;
uint32_t max_ref_div;
uint32_t min_post_div;
@@ -168,7 +151,12 @@ struct radeon_pll {
uint32_t max_feedback_div;
uint32_t min_frac_feedback_div;
uint32_t max_frac_feedback_div;
- uint32_t best_vco;
+
+ /* flags for the current clock */
+ uint32_t flags;
+
+ /* pll id */
+ uint32_t id;
};
struct radeon_i2c_chan {
@@ -311,7 +299,7 @@ struct radeon_atom_ss {
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
- int dig_block;
+ int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
@@ -334,6 +322,9 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int hdmi_offset;
+ int hdmi_audio_workaround;
+ int hdmi_buffer_status;
};
struct radeon_connector_atom_dig {
@@ -392,6 +383,11 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
@@ -434,8 +430,7 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint64_t freq,
@@ -443,8 +438,7 @@ extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
uint32_t *fb_div_p,
uint32_t *frac_fb_div_p,
uint32_t *ref_div_p,
- uint32_t *post_div_p,
- int flags);
+ uint32_t *post_div_p);
extern void radeon_setup_encoder_clones(struct drm_device *dev);
@@ -470,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
-extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 544e18f..f1da370 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,6 +56,13 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &radeon_ttm_bo_destroy)
+ return true;
+ return false;
+}
+
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0;
@@ -71,6 +78,8 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
if (domain & RADEON_GEM_DOMAIN_CPU)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
}
@@ -211,9 +220,11 @@ int radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
- if (rdev->flags & RADEON_IS_IGP) {
- /* Useless to evict on IGP chips */
- return 0;
+ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+ if (0 && (rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->mc.igp_sideport_enabled == false)
+ /* Useless to evict on IGP chips */
+ return 0;
}
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
@@ -295,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
}
}
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
int r;
r = radeon_bo_list_reserve(head);
@@ -323,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
- if (fence) {
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- }
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
}
return 0;
}
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
{
struct radeon_bo_list *lobj;
- struct radeon_fence *old_fence;
-
- if (fence)
- list_for_each_entry(lobj, head, list) {
- old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
- if (old_fence == fence) {
- lobj->bo->tbo.sync_obj = NULL;
- radeon_fence_unref(&old_fence);
- }
+ struct radeon_bo *bo;
+ struct radeon_fence *old_fence = NULL;
+
+ list_for_each_entry(lobj, head, list) {
+ bo = lobj->bo;
+ spin_lock(&bo->tbo.lock);
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
+ spin_unlock(&bo->tbo.lock);
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
}
- radeon_bo_list_unreserve(head);
+ }
}
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
@@ -481,14 +486,20 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f6b69c2..7ab43de 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -59,19 +59,17 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
int r;
-retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r == -ERESTART)
- goto retry;
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
@@ -125,12 +123,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
{
int r;
-retry:
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0)) {
- if (r == -ERESTART)
- goto retry;
- dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
return r;
}
spin_lock(&bo->tbo.lock);
@@ -140,8 +136,6 @@ retry:
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.lock);
ttm_bo_unreserve(&bo->tbo);
- if (unlikely(r == -ERESTART))
- goto retry;
return r;
}
@@ -162,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 34b08d3..8bce64c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d..6579eb4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
- unsigned long i;
- int r = 0;
+ int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
- DRM_ERROR("failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
- i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
- if (i < RADEON_IB_POOL_SIZE) {
- set_bit(i, rdev->ib_pool.alloc_bm);
- rdev->ib_pool.ibs[i].length_dw = 0;
- *ib = &rdev->ib_pool.ibs[i];
- mutex_unlock(&rdev->ib_pool.mutex);
- goto out;
+ for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+ i &= (RADEON_IB_POOL_SIZE - 1);
+ if (rdev->ib_pool.ibs[i].free) {
+ nib = &rdev->ib_pool.ibs[i];
+ break;
+ }
}
- if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
- /* we go do nothings here */
+ if (nib == NULL) {
+ /* This should never happen, it means we allocated all
+ * IB and haven't scheduled one yet, return EBUSY to
+ * userspace hoping that on ioctl recall we get better
+ * luck
+ */
+ dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("all IB allocated none scheduled.\n");
- r = -EINVAL;
- goto out;
+ radeon_fence_unref(&fence);
+ return -EBUSY;
}
- /* get the first ib on the scheduled list */
- nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
- struct radeon_ib, list);
- if (nib->fence == NULL) {
- /* we go do nothings here */
+ rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ nib->free = false;
+ if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
- r = -EINVAL;
- goto out;
- }
- mutex_unlock(&rdev->ib_pool.mutex);
-
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
- (unsigned long)nib->gpu_addr, nib->length_dw);
- DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
- goto out;
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+ nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+ mutex_lock(&rdev->ib_pool.mutex);
+ nib->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return r;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
}
radeon_fence_unref(&nib->fence);
-
+ nib->fence = fence;
nib->length_dw = 0;
-
- /* scheduled list is accessed here */
- mutex_lock(&rdev->ib_pool.mutex);
- list_del(&nib->list);
- INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
-
*ib = nib;
-out:
- if (r) {
- radeon_fence_unref(&fence);
- } else {
- (*ib)->fence = fence;
- }
- return r;
+ return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- mutex_lock(&rdev->ib_pool.mutex);
- if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
- /* IB is scheduled & not signaled don't do anythings */
- mutex_unlock(&rdev->ib_pool.mutex);
- return;
- }
- list_del(&tmp->list);
- INIT_LIST_HEAD(&tmp->list);
- if (tmp->fence)
+ if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
-
- tmp->length_dw = 0;
- clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+ mutex_lock(&rdev->ib_pool.mutex);
+ tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+ DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+ /* once scheduled IB is considered free and protected by the fence */
+ ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
/* Allocate 1M object buffer */
- INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+ rdev->ib_pool.ibs[i].free = true;
}
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) {
return 0;
}
- seq_printf(m, "IB %04lu\n", ib->idx);
+ seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 391c973..9f5e2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size) / size;
+ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+ rdev->cp.ring_size)) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5a19d52..58b5adf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -200,10 +200,25 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_bo *rbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+ }
+ rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ if (rbo->rdev->cp.ready == false)
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ else
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
default:
@@ -482,6 +497,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
+ rdev->mman.initialized = true;
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
@@ -529,6 +545,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
{
int r;
+ if (!rdev->mman.initialized)
+ return;
if (rdev->stollen_vga_memory) {
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r == 0) {
@@ -542,6 +560,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
radeon_ttm_global_fini(rdev);
+ rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
index 6021c88..c29ac43 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r200
+++ b/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -91,6 +91,8 @@ r200 0x3294
0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
0x2648 RE_POINTSIZE
0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 0000000..989f7a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,795 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A4 SC_HYPERZ_EN
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F1C ZB_BW_CNTL
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F30 ZB_ZMASK_OFFSET
+0x4F34 ZB_ZMASK_PITCH
+0x4F38 ZB_ZMASK_WRINDEX
+0x4F3C ZB_ZMASK_DWORD
+0x4F40 ZB_ZMASK_RDINDEX
+0x4F44 ZB_HIZ_OFFSET
+0x4F48 ZB_HIZ_WRINDEX
+0x4F4C ZB_HIZ_DWORD
+0x4F50 ZB_HIZ_RDINDEX
+0x4F54 ZB_HIZ_PITCH
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 8e3c0b8..6801b86 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -153,7 +153,7 @@ rs600 0x6d40
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
-0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 0102a0d..38abf63 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -161,7 +161,12 @@ rv515 0x6d40
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c1fcddd..287fceb 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 2)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
+ if (rs400_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+ "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
}
}
@@ -356,6 +372,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -369,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
r100_mc_stop(rdev, &save);
/* Wait for mc idle */
- if (r300_mc_wait_for_idle(rdev))
- dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ if (rs400_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -395,6 +412,7 @@ static int rs400_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -446,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -497,6 +514,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rs400_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -523,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea42..c381856 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xffffffffUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
break;
}
}
- rs600_irq_set(rdev);
+ if (rdev->irq.installed)
+ rs600_irq_set(rdev);
}
void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
@@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
while (status || r500_disp_int) {
/* SW interrupt */
- if (G_000040_SW_INT_EN(status))
+ if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -602,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -681,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1e22f52..06e2771 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -674,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -754,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a5..0e1e6b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -536,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -614,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index fbb0357..0302167 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+ rdev->config.rv770.max_backends,
+ (0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
@@ -779,7 +782,6 @@ int rv770_mc_init(struct radeon_device *rdev)
fixed20_12 a;
u32 tmp;
int chansize, numchan;
- int r;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -822,9 +824,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r)
- return r;
/* gtt_size is setup by radeon_agp_init */
rdev->mc.gtt_location = rdev->mc.agp_base;
tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -891,18 +890,25 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
rv770_gpu_init(rdev);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ r = r600_blit_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
- return r;
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+ /* pin copy shader into vram */
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("failed to pin blit object %d\n", r);
+ return r;
+ }
}
-
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -964,13 +970,16 @@ int rv770_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_irq_suspend(rdev);
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (likely(r == 0)) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (rdev->r600_blit.shader_obj) {
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
}
return 0;
}
@@ -1029,6 +1038,11 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
r = rv770_mc_init(rdev);
if (r)
return r;
@@ -1051,31 +1065,28 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed blitter (%d).\n", r);
- return r;
- }
-
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
- rv770_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
+ } else {
+ r = r600_ib_test(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
}
}
return 0;
@@ -1083,19 +1094,16 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- rv770_suspend(rdev);
-
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa..021de44 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e725cc0..4fd1f06 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e..ec5a43e 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1fbb2ee..c7320ce 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
return -EINVAL;
}
-static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
- struct ttm_mem_type_manager *man)
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
- printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
+ printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
man->default_caching);
- spin_lock(&glob->lru_lock);
- drm_mm_debug_table(&man->manager, TTM_PFX);
- spin_unlock(&glob->lru_lock);
+ if (mem_type != TTM_PL_SYSTEM) {
+ spin_lock(&bdev->glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&bdev->glob->lru_lock);
+ }
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
- struct ttm_mem_type_manager *man;
int i, ret, mem_type;
- printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+ printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
@@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
&mem_type);
if (ret)
return;
- man = &bdev->man[mem_type];
printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
- ttm_mem_type_manager_debug(glob, man);
+ ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -427,7 +426,8 @@ moved:
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
spin_unlock(&bo->lock);
- }
+ } else
+ bo->offset = 0;
return 0;
@@ -465,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
BUG_ON(ret);
if (bo->ttm)
@@ -472,20 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ++put_count;
}
if (bo->mem.mm_node) {
bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
- put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0);
while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0;
}
@@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry, *nentry;
- struct list_head *list, *next;
- int ret;
+ struct ttm_buffer_object *entry = NULL;
+ int ret = 0;
spin_lock(&glob->lru_lock);
- list_for_each_safe(list, next, &bdev->ddestroy) {
- entry = list_entry(list, struct ttm_buffer_object, ddestroy);
- nentry = NULL;
+ if (list_empty(&bdev->ddestroy))
+ goto out_unlock;
- /*
- * Protect the next list entry from destruction while we
- * unlock the lru_lock.
- */
+ entry = list_first_entry(&bdev->ddestroy,
+ struct ttm_buffer_object, ddestroy);
+ kref_get(&entry->list_kref);
+
+ for (;;) {
+ struct ttm_buffer_object *nentry = NULL;
- if (next != &bdev->ddestroy) {
- nentry = list_entry(next, struct ttm_buffer_object,
- ddestroy);
+ if (entry->ddestroy.next != &bdev->ddestroy) {
+ nentry = list_first_entry(&entry->ddestroy,
+ struct ttm_buffer_object, ddestroy);
kref_get(&nentry->list_kref);
}
- kref_get(&entry->list_kref);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_cleanup_refs(entry, remove_all);
kref_put(&entry->list_kref, ttm_bo_release_list);
+ entry = nentry;
+
+ if (ret || !entry)
+ goto out;
spin_lock(&glob->lru_lock);
- if (nentry) {
- bool next_onlist = !list_empty(next);
- spin_unlock(&glob->lru_lock);
- kref_put(&nentry->list_kref, ttm_bo_release_list);
- spin_lock(&glob->lru_lock);
- /*
- * Someone might have raced us and removed the
- * next entry from the list. We don't bother restarting
- * list traversal.
- */
-
- if (!next_onlist)
- break;
- }
- if (ret)
+ if (list_empty(&entry->ddestroy))
break;
}
- ret = !list_empty(&bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+out_unlock:
+ spin_unlock(&glob->lru_lock);
+out:
+ if (entry)
+ kref_put(&entry->list_kref, ttm_bo_release_list);
return ret;
}
@@ -684,19 +677,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo;
int ret, put_count = 0;
+retry:
spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
+
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(bo);
+
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&glob->lru_lock);
+ if (likely(!no_wait))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ /**
+ * We *need* to retry after releasing the lru lock.
+ */
+
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ }
+
+ put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- if (unlikely(ret != 0))
- return ret;
+
+ BUG_ON(ret != 0);
+
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
ret = ttm_bo_evict(bo, interruptible, no_wait);
ttm_bo_unreserve(bo);
+
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -849,7 +868,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
int i, ret;
mem->mm_node = NULL;
- for (i = 0; i <= placement->num_placement; ++i) {
+ for (i = 0; i < placement->num_placement; ++i) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
@@ -900,8 +919,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_found)
return -EINVAL;
- for (i = 0; i <= placement->num_busy_placement; ++i) {
- ret = ttm_mem_type_from_flags(placement->placement[i],
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
&mem_type);
if (ret)
return ret;
@@ -911,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!ttm_bo_mt_compatible(man,
bo->type == ttm_bo_type_user,
mem_type,
- placement->placement[i],
+ placement->busy_placement[i],
&cur_flags))
continue;
@@ -921,9 +940,17 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, placement->placement[i],
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
+
+ if (mem_type == TTM_PL_SYSTEM) {
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ mem->mm_node = NULL;
+ return 0;
+ }
+
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
@@ -993,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (node && placement->lpfn != 0 &&
+ (node->start < placement->fpfn ||
+ node->start + node->size > placement->lpfn))
+ return -1;
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
@@ -1115,6 +1148,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
+ bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
@@ -1817,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* anyone tries to access a ttm page.
*/
+ if (bo->bdev->driver->swap_notify)
+ bo->bdev->driver->swap_notify(bo);
+
ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
out:
@@ -1837,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
;
}
+EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ecf7d0..5ca37a5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
- save_flags = old_mem->placement;
}
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
void *old_iomap;
void *new_iomap;
int ret;
- uint32_t save_flags = old_mem->placement;
unsigned long i;
unsigned long page;
unsigned long add = 0;
@@ -270,7 +267,6 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
ttm_tt_unbind(ttm);
@@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- uint32_t save_flags = old_mem->placement;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
@@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 609a85a..668dbe8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
return -EFAULT;
driver = bo->bdev->driver;
- if (unlikely(driver->verify_access)) {
+ if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index f619ebc..3d172ef 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
+EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
@@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
+EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1099aba..75e9d6f 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -109,8 +109,8 @@ struct ttm_ref_object {
struct drm_hash_item hash;
struct list_head head;
struct kref kref;
- struct ttm_base_object *obj;
enum ttm_ref_type ref_type;
+ struct ttm_base_object *obj;
struct ttm_object_file *tfile;
};
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9c2b1cc..3d47a2c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
+ int ret = 0;
+
if (PageHighMem(p))
return 0;
- switch (c_state) {
- case tt_cached:
- return set_pages_wb(p, 1);
- case tt_wc:
- return set_memory_wc((unsigned long) page_address(p), 1);
- default:
- return set_pages_uc(p, 1);
+ if (c_old != tt_cached) {
+ /* p isn't in the default caching state, set it to
+ * writeback first to free its current memtype. */
+
+ ret = set_pages_wb(p, 1);
+ if (ret)
+ return ret;
}
+
+ if (c_new == tt_wc)
+ ret = set_memory_wc((unsigned long) page_address(p), 1);
+ else if (c_new == tt_uncached)
+ ret = set_pages_uc(p, 1);
+
+ return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
return 0;
}
@@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page, c_state);
+ ret = ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state,
+ c_state);
if (unlikely(ret != 0))
goto out_err;
}
@@ -259,7 +272,7 @@ out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page,
+ (void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index bc2f518..7a1b210 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -58,7 +58,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 0000000..f20b8bc
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
+config DRM_VMWGFX
+ tristate "DRM driver for VMware Virtual GPU"
+ depends on DRM && PCI
+ select FB_DEFERRED_IO
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_TTM
+ help
+ KMS enabled DRM driver for SVGA2 virtual hardware.
+
+ If unsure say n. The compiled module will be
+ called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 0000000..1a3cb68
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
+
+ccflags-y := -Iinclude/drm
+
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o
+
+obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 0000000..77cb453
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ * SVGA 3D hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#include "svga_reg.h"
+
+
+/*
+ * 3D Hardware Version
+ *
+ * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ * register. Is set by the host and read by the guest. This lets
+ * us make new guest drivers which are backwards-compatible with old
+ * SVGA hardware revisions. It does not let us support old guest
+ * drivers. Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
+
+typedef enum {
+ SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
+ SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
+ SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
+ SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
+ SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+ SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * Generic Types
+ */
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
+
+
+/*
+ * Surface formats.
+ *
+ * If you modify this list, be sure to keep GLUtil.c in sync. It
+ * includes the internal format definition of each surface in
+ * GLUtil_ConvertSurfaceFormat, and it contains a table of
+ * human-readable names in GLUtil_GetFormatName.
+ */
+
+typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_INVALID = 0,
+
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
+
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
+
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
+
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
+
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
+
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
+
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+
+ SVGA3D_A2R10G10B10 = 26,
+
+ /* signed formats */
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
+
+ /* mixed formats */
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
+
+ SVGA3D_ALPHA8 = 32,
+
+ /* Single- and dual-component floating point formats */
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
+
+ /*
+ * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
+ * the most efficient format to use when creating new surfaces
+ * expressly for index or vertex data.
+ */
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
+
+ SVGA3D_V16U16 = 39,
+
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
+
+ /* Packed Video formats */
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+/*
+ * These match the D3DFORMAT_OP definitions used by Direct3D. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
+typedef enum {
+ SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
+ SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
+ SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
+ SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip). This flag
+ * should not to be set on alpha formats.
+ */
+ SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+ SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+ SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+ SVGA3DFORMAT_OP_DMAP = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+ SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target (meaning that the
+ * pixel pipe will DE-linearize data on output to format)
+ */
+ SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate wrap
+ * modes, nor mipmapping
+ */
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
+} SVGA3dFormatOp;
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*.
+ * Entries must be located at the same position.
+ */
+typedef union {
+ uint32 value;
+ struct {
+ uint32 texture : 1;
+ uint32 volumeTexture : 1;
+ uint32 cubeTexture : 1;
+ uint32 offscreenRenderTarget : 1;
+ uint32 sameFormatRenderTarget : 1;
+ uint32 unknown1 : 1;
+ uint32 zStencil : 1;
+ uint32 zStencilArbitraryDepth : 1;
+ uint32 sameFormatUpToAlpha : 1;
+ uint32 unknown2 : 1;
+ uint32 displayMode : 1;
+ uint32 acceleration3d : 1;
+ uint32 pixelSize : 1;
+ uint32 convertToARGB : 1;
+ uint32 offscreenPlain : 1;
+ uint32 sRGBRead : 1;
+ uint32 bumpMap : 1;
+ uint32 dmap : 1;
+ uint32 noFilter : 1;
+ uint32 memberOfGroupARGB : 1;
+ uint32 sRGBWrite : 1;
+ uint32 noAlphaBlend : 1;
+ uint32 autoGenMipMap : 1;
+ uint32 vertexTexture : 1;
+ uint32 noTexCoordWrapNorMip : 1;
+ };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_RS_INVALID = 0,
+ SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
+ SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
+ SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
+ SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
+ SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
+ SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
+ SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
+ SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
+ SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
+ SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
+ SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
+ SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
+ SVGA3D_RS_STENCILREF = 13, /* uint32 */
+ SVGA3D_RS_STENCILMASK = 14, /* uint32 */
+ SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
+ SVGA3D_RS_FOGSTART = 16, /* float */
+ SVGA3D_RS_FOGEND = 17, /* float */
+ SVGA3D_RS_FOGDENSITY = 18, /* float */
+ SVGA3D_RS_POINTSIZE = 19, /* float */
+ SVGA3D_RS_POINTSIZEMIN = 20, /* float */
+ SVGA3D_RS_POINTSIZEMAX = 21, /* float */
+ SVGA3D_RS_POINTSCALE_A = 22, /* float */
+ SVGA3D_RS_POINTSCALE_B = 23, /* float */
+ SVGA3D_RS_POINTSCALE_C = 24, /* float */
+ SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
+ SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
+ SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
+ SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
+ SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
+ SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
+ SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
+ SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
+ SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
+ SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
+ SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
+ SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
+ SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
+ SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
+ SVGA3D_RS_ZBIAS = 45, /* float */
+ SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
+ SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
+ SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
+ SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
+ SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
+ SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
+ SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
+ SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
+ SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
+ SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
+ SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
+ SVGA3D_RS_DEPTHBIAS = 64, /* float */
+
+
+ /*
+ * Output Gamma Level
+ *
+ * Output gamma effects the gamma curve of colors that are output from the
+ * rendering pipeline. A value of 1.0 specifies a linear color space. If the
+ * value is <= 0.0, gamma correction is ignored and linear color space is
+ * used.
+ */
+
+ SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
+ SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
+ SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
+ SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
+ SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
+ SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
+ SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
+ SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
+ SVGA3D_RS_TWEENFACTOR = 88, /* float */
+ SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
+ SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
+ SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+ SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
+ SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
+ SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
+} SVGA3dVertexMaterial;
+
+typedef enum {
+ SVGA3D_FILLMODE_INVALID = 0,
+ SVGA3D_FILLMODE_POINT = 1,
+ SVGA3D_FILLMODE_LINE = 2,
+ SVGA3D_FILLMODE_FILL = 3,
+ SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+union {
+ struct {
+ uint16 mode; /* SVGA3dFillModeType */
+ uint16 face; /* SVGA3dFace */
+ };
+ uint32 uintValue;
+} SVGA3dFillMode;
+
+typedef enum {
+ SVGA3D_SHADEMODE_INVALID = 0,
+ SVGA3D_SHADEMODE_FLAT = 1,
+ SVGA3D_SHADEMODE_SMOOTH = 2,
+ SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
+ SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+union {
+ struct {
+ uint16 repeat;
+ uint16 pattern;
+ };
+ uint32 uintValue;
+} SVGA3dLinePattern;
+
+typedef enum {
+ SVGA3D_BLENDOP_INVALID = 0,
+ SVGA3D_BLENDOP_ZERO = 1,
+ SVGA3D_BLENDOP_ONE = 2,
+ SVGA3D_BLENDOP_SRCCOLOR = 3,
+ SVGA3D_BLENDOP_INVSRCCOLOR = 4,
+ SVGA3D_BLENDOP_SRCALPHA = 5,
+ SVGA3D_BLENDOP_INVSRCALPHA = 6,
+ SVGA3D_BLENDOP_DESTALPHA = 7,
+ SVGA3D_BLENDOP_INVDESTALPHA = 8,
+ SVGA3D_BLENDOP_DESTCOLOR = 9,
+ SVGA3D_BLENDOP_INVDESTCOLOR = 10,
+ SVGA3D_BLENDOP_SRCALPHASAT = 11,
+ SVGA3D_BLENDOP_BLENDFACTOR = 12,
+ SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
+ SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+ SVGA3D_BLENDEQ_INVALID = 0,
+ SVGA3D_BLENDEQ_ADD = 1,
+ SVGA3D_BLENDEQ_SUBTRACT = 2,
+ SVGA3D_BLENDEQ_REVSUBTRACT = 3,
+ SVGA3D_BLENDEQ_MINIMUM = 4,
+ SVGA3D_BLENDEQ_MAXIMUM = 5,
+ SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+ SVGA3D_FRONTWINDING_INVALID = 0,
+ SVGA3D_FRONTWINDING_CW = 1,
+ SVGA3D_FRONTWINDING_CCW = 2,
+ SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+ SVGA3D_FACE_INVALID = 0,
+ SVGA3D_FACE_NONE = 1,
+ SVGA3D_FACE_FRONT = 2,
+ SVGA3D_FACE_BACK = 3,
+ SVGA3D_FACE_FRONT_BACK = 4,
+ SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+ SVGA3D_CMP_INVALID = 0,
+ SVGA3D_CMP_NEVER = 1,
+ SVGA3D_CMP_LESS = 2,
+ SVGA3D_CMP_EQUAL = 3,
+ SVGA3D_CMP_LESSEQUAL = 4,
+ SVGA3D_CMP_GREATER = 5,
+ SVGA3D_CMP_NOTEQUAL = 6,
+ SVGA3D_CMP_GREATEREQUAL = 7,
+ SVGA3D_CMP_ALWAYS = 8,
+ SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+ SVGA3D_FOGFUNC_INVALID = 0,
+ SVGA3D_FOGFUNC_EXP = 1,
+ SVGA3D_FOGFUNC_EXP2 = 2,
+ SVGA3D_FOGFUNC_LINEAR = 3,
+ SVGA3D_FOGFUNC_PER_VERTEX = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+ SVGA3D_FOGTYPE_INVALID = 0,
+ SVGA3D_FOGTYPE_VERTEX = 1,
+ SVGA3D_FOGTYPE_PIXEL = 2,
+ SVGA3D_FOGTYPE_MAX = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+ SVGA3D_FOGBASE_INVALID = 0,
+ SVGA3D_FOGBASE_DEPTHBASED = 1,
+ SVGA3D_FOGBASE_RANGEBASED = 2,
+ SVGA3D_FOGBASE_MAX = 3
+} SVGA3dFogBase;
+
+typedef enum {
+ SVGA3D_STENCILOP_INVALID = 0,
+ SVGA3D_STENCILOP_KEEP = 1,
+ SVGA3D_STENCILOP_ZERO = 2,
+ SVGA3D_STENCILOP_REPLACE = 3,
+ SVGA3D_STENCILOP_INCRSAT = 4,
+ SVGA3D_STENCILOP_DECRSAT = 5,
+ SVGA3D_STENCILOP_INVERT = 6,
+ SVGA3D_STENCILOP_INCR = 7,
+ SVGA3D_STENCILOP_DECR = 8,
+ SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+ SVGA3D_CLIPPLANE_0 = (1 << 0),
+ SVGA3D_CLIPPLANE_1 = (1 << 1),
+ SVGA3D_CLIPPLANE_2 = (1 << 2),
+ SVGA3D_CLIPPLANE_3 = (1 << 3),
+ SVGA3D_CLIPPLANE_4 = (1 << 4),
+ SVGA3D_CLIPPLANE_5 = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+ SVGA3D_CLEAR_COLOR = 0x1,
+ SVGA3D_CLEAR_DEPTH = 0x2,
+ SVGA3D_CLEAR_STENCIL = 0x4
+} SVGA3dClearFlag;
+
+typedef enum {
+ SVGA3D_RT_DEPTH = 0,
+ SVGA3D_RT_STENCIL = 1,
+ SVGA3D_RT_COLOR0 = 2,
+ SVGA3D_RT_COLOR1 = 3,
+ SVGA3D_RT_COLOR2 = 4,
+ SVGA3D_RT_COLOR3 = 5,
+ SVGA3D_RT_COLOR4 = 6,
+ SVGA3D_RT_COLOR5 = 7,
+ SVGA3D_RT_COLOR6 = 8,
+ SVGA3D_RT_COLOR7 = 9,
+ SVGA3D_RT_MAX,
+ SVGA3D_RT_INVALID = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+union {
+ struct {
+ uint32 red : 1;
+ uint32 green : 1;
+ uint32 blue : 1;
+ uint32 alpha : 1;
+ };
+ uint32 uintValue;
+} SVGA3dColorMask;
+
+typedef enum {
+ SVGA3D_VBLEND_DISABLE = 0,
+ SVGA3D_VBLEND_1WEIGHT = 1,
+ SVGA3D_VBLEND_2WEIGHT = 2,
+ SVGA3D_VBLEND_3WEIGHT = 3,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+ SVGA3D_WRAPCOORD_0 = 1 << 0,
+ SVGA3D_WRAPCOORD_1 = 1 << 1,
+ SVGA3D_WRAPCOORD_2 = 1 << 2,
+ SVGA3D_WRAPCOORD_3 = 1 << 3,
+ SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_TS_INVALID = 0,
+ SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
+ SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
+ SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
+ SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
+ SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
+ SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
+ SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
+ SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
+ SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
+ SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
+ SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
+ SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
+ SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
+ SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
+ SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
+ SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
+ SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
+
+
+ /*
+ * Sampler Gamma Level
+ *
+ * Sampler gamma effects the color of samples taken from the sampler. A
+ * value of 1.0 will produce linear samples. If the value is <= 0.0 the
+ * gamma value is ignored and a linear space is used.
+ */
+
+ SVGA3D_TS_GAMMA = 25, /* float */
+ SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
+ SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
+ SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
+ SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+ SVGA3D_TC_INVALID = 0,
+ SVGA3D_TC_DISABLE = 1,
+ SVGA3D_TC_SELECTARG1 = 2,
+ SVGA3D_TC_SELECTARG2 = 3,
+ SVGA3D_TC_MODULATE = 4,
+ SVGA3D_TC_ADD = 5,
+ SVGA3D_TC_ADDSIGNED = 6,
+ SVGA3D_TC_SUBTRACT = 7,
+ SVGA3D_TC_BLENDTEXTUREALPHA = 8,
+ SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
+ SVGA3D_TC_BLENDCURRENTALPHA = 10,
+ SVGA3D_TC_BLENDFACTORALPHA = 11,
+ SVGA3D_TC_MODULATE2X = 12,
+ SVGA3D_TC_MODULATE4X = 13,
+ SVGA3D_TC_DSDT = 14,
+ SVGA3D_TC_DOTPRODUCT3 = 15,
+ SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
+ SVGA3D_TC_ADDSIGNED2X = 17,
+ SVGA3D_TC_ADDSMOOTH = 18,
+ SVGA3D_TC_PREMODULATE = 19,
+ SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
+ SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
+ SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+ SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+ SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
+ SVGA3D_TC_MULTIPLYADD = 25,
+ SVGA3D_TC_LERP = 26,
+ SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+ SVGA3D_TEX_ADDRESS_INVALID = 0,
+ SVGA3D_TEX_ADDRESS_WRAP = 1,
+ SVGA3D_TEX_ADDRESS_MIRROR = 2,
+ SVGA3D_TEX_ADDRESS_CLAMP = 3,
+ SVGA3D_TEX_ADDRESS_BORDER = 4,
+ SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+ SVGA3D_TEX_ADDRESS_EDGE = 6,
+ SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+ SVGA3D_TEX_FILTER_NONE = 0,
+ SVGA3D_TEX_FILTER_NEAREST = 1,
+ SVGA3D_TEX_FILTER_LINEAR = 2,
+ SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
+ SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+ SVGA3D_TEX_TRANSFORM_OFF = 0,
+ SVGA3D_TEX_TRANSFORM_S = (1 << 0),
+ SVGA3D_TEX_TRANSFORM_T = (1 << 1),
+ SVGA3D_TEX_TRANSFORM_R = (1 << 2),
+ SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
+ SVGA3D_TEX_PROJECTED = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+ SVGA3D_TEXCOORD_GEN_OFF = 0,
+ SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
+ SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
+ SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+ SVGA3D_TEXCOORD_GEN_SPHERE = 4,
+ SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+ SVGA3D_TA_INVALID = 0,
+ SVGA3D_TA_CONSTANT = 1,
+ SVGA3D_TA_PREVIOUS = 2,
+ SVGA3D_TA_DIFFUSE = 3,
+ SVGA3D_TA_TEXTURE = 4,
+ SVGA3D_TA_SPECULAR = 5,
+ SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+ SVGA3D_TM_NONE = 0,
+ SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
+ SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+#define SVGA3D_INVALID_ID ((uint32)-1)
+#define SVGA3D_MAX_CLIP_PLANES 6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+ SVGA3D_DECLUSAGE_POSITION = 0,
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
+ SVGA3D_DECLUSAGE_BLENDINDICES, // 2
+ SVGA3D_DECLUSAGE_NORMAL, // 3
+ SVGA3D_DECLUSAGE_PSIZE, // 4
+ SVGA3D_DECLUSAGE_TEXCOORD, // 5
+ SVGA3D_DECLUSAGE_TANGENT, // 6
+ SVGA3D_DECLUSAGE_BINORMAL, // 7
+ SVGA3D_DECLUSAGE_TESSFACTOR, // 8
+ SVGA3D_DECLUSAGE_POSITIONT, // 9
+ SVGA3D_DECLUSAGE_COLOR, // 10
+ SVGA3D_DECLUSAGE_FOG, // 11
+ SVGA3D_DECLUSAGE_DEPTH, // 12
+ SVGA3D_DECLUSAGE_SAMPLE, // 13
+ SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+ SVGA3D_DECLMETHOD_DEFAULT = 0,
+ SVGA3D_DECLMETHOD_PARTIALU,
+ SVGA3D_DECLMETHOD_PARTIALV,
+ SVGA3D_DECLMETHOD_CROSSUV, // Normal
+ SVGA3D_DECLMETHOD_UV,
+ SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+} SVGA3dDeclMethod;
+
+typedef enum {
+ SVGA3D_DECLTYPE_FLOAT1 = 0,
+ SVGA3D_DECLTYPE_FLOAT2 = 1,
+ SVGA3D_DECLTYPE_FLOAT3 = 2,
+ SVGA3D_DECLTYPE_FLOAT4 = 3,
+ SVGA3D_DECLTYPE_D3DCOLOR = 4,
+ SVGA3D_DECLTYPE_UBYTE4 = 5,
+ SVGA3D_DECLTYPE_SHORT2 = 6,
+ SVGA3D_DECLTYPE_SHORT4 = 7,
+ SVGA3D_DECLTYPE_UBYTE4N = 8,
+ SVGA3D_DECLTYPE_SHORT2N = 9,
+ SVGA3D_DECLTYPE_SHORT4N = 10,
+ SVGA3D_DECLTYPE_USHORT2N = 11,
+ SVGA3D_DECLTYPE_USHORT4N = 12,
+ SVGA3D_DECLTYPE_UDEC3 = 13,
+ SVGA3D_DECLTYPE_DEC3N = 14,
+ SVGA3D_DECLTYPE_FLOAT16_2 = 15,
+ SVGA3D_DECLTYPE_FLOAT16_4 = 16,
+ SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+ struct {
+ /*
+ * For index data, this number represents the number of instances to draw.
+ * For instance data, this number represents the number of
+ * instances/vertex in this stream
+ */
+ uint32 count : 30;
+
+ /*
+ * This is 1 if this is supposed to be the data that is repeated for
+ * every instance.
+ */
+ uint32 indexedData : 1;
+
+ /*
+ * This is 1 if this is supposed to be the per-instance data.
+ */
+ uint32 instanceData : 1;
+ };
+
+ uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+ SVGA3D_PRIMITIVE_INVALID = 0,
+ SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
+ SVGA3D_PRIMITIVE_POINTLIST = 2,
+ SVGA3D_PRIMITIVE_LINELIST = 3,
+ SVGA3D_PRIMITIVE_LINESTRIP = 4,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
+ SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
+ SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+ SVGA3D_COORDINATE_INVALID = 0,
+ SVGA3D_COORDINATE_LEFTHANDED = 1,
+ SVGA3D_COORDINATE_RIGHTHANDED = 2,
+ SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+ SVGA3D_TRANSFORM_INVALID = 0,
+ SVGA3D_TRANSFORM_WORLD = 1,
+ SVGA3D_TRANSFORM_VIEW = 2,
+ SVGA3D_TRANSFORM_PROJECTION = 3,
+ SVGA3D_TRANSFORM_TEXTURE0 = 4,
+ SVGA3D_TRANSFORM_TEXTURE1 = 5,
+ SVGA3D_TRANSFORM_TEXTURE2 = 6,
+ SVGA3D_TRANSFORM_TEXTURE3 = 7,
+ SVGA3D_TRANSFORM_TEXTURE4 = 8,
+ SVGA3D_TRANSFORM_TEXTURE5 = 9,
+ SVGA3D_TRANSFORM_TEXTURE6 = 10,
+ SVGA3D_TRANSFORM_TEXTURE7 = 11,
+ SVGA3D_TRANSFORM_WORLD1 = 12,
+ SVGA3D_TRANSFORM_WORLD2 = 13,
+ SVGA3D_TRANSFORM_WORLD3 = 14,
+ SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+ SVGA3D_LIGHTTYPE_INVALID = 0,
+ SVGA3D_LIGHTTYPE_POINT = 1,
+ SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
+ SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
+ SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
+ SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+ SVGA3D_CUBEFACE_POSX = 0,
+ SVGA3D_CUBEFACE_NEGX = 1,
+ SVGA3D_CUBEFACE_POSY = 2,
+ SVGA3D_CUBEFACE_NEGY = 3,
+ SVGA3D_CUBEFACE_POSZ = 4,
+ SVGA3D_CUBEFACE_NEGZ = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+ SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
+ SVGA3D_SHADERTYPE_VS = 1,
+ SVGA3D_SHADERTYPE_PS = 2,
+ SVGA3D_SHADERTYPE_MAX
+} SVGA3dShaderType;
+
+typedef enum {
+ SVGA3D_CONST_TYPE_FLOAT = 0,
+ SVGA3D_CONST_TYPE_INT = 1,
+ SVGA3D_CONST_TYPE_BOOL = 2,
+} SVGA3dShaderConstType;
+
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+typedef enum {
+ SVGA3D_STRETCH_BLT_POINT = 0,
+ SVGA3D_STRETCH_BLT_LINEAR = 1,
+ SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+ SVGA3D_QUERYTYPE_OCCLUSION = 0,
+ SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef enum {
+ SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
+ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
+ SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
+ SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
+} SVGA3dQueryState;
+
+typedef enum {
+ SVGA3D_WRITE_HOST_VRAM = 1,
+ SVGA3D_READ_HOST_VRAM = 2,
+} SVGA3dTransferType;
+
+/*
+ * The maximum number vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS 32
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+#define SVGA_3D_CMD_LEGACY_BASE 1000
+#define SVGA_3D_CMD_BASE 1040
+
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
+#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
+#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
+#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
+#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
+#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
+#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
+#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
+#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
+#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
+#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
+#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
+#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
+#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
+#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
+#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
+#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
+#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
+#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
+#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
+#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
+#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
+#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
+#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
+#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
+#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
+#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
+
+#define SVGA_3D_CMD_FUTURE_MAX 2000
+
+/*
+ * Common substructures used in multiple FIFO commands:
+ */
+
+typedef struct {
+ union {
+ struct {
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
+ };
+ uint32 uintValue;
+ };
+} SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+struct SVGA3dSurfaceImageId {
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
+} SVGA3dSurfaceImageId;
+
+typedef
+struct SVGA3dGuestImage {
+ SVGAGuestPtr ptr;
+
+ /*
+ * A note on interpretation of pitch: This value of pitch is the
+ * number of bytes between vertically adjacent image
+ * blocks. Normally this is the number of bytes between the first
+ * pixel of two adjacent scanlines. With compressed textures,
+ * however, this may represent the number of bytes between
+ * compression blocks rather than between rows of pixels.
+ *
+ * XXX: Compressed textures currently must be tightly packed in guest memory.
+ *
+ * If the image is 1-dimensional, pitch is ignored.
+ *
+ * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+ * assuming each row of blocks is tightly packed.
+ */
+ uint32 pitch;
+} SVGA3dGuestImage;
+
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+struct {
+ uint32 id;
+ uint32 size;
+} SVGA3dCmdHeader;
+
+/*
+ * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
+ * optional mipmaps and cube faces.
+ */
+
+typedef
+struct {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+} SVGA3dSize;
+
+typedef enum {
+ SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+ SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
+ SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
+ SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
+ SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
+ SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
+ SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+} SVGA3dSurfaceFlags;
+
+typedef
+struct {
+ uint32 numMipLevels;
+} SVGA3dSurfaceFace;
+
+typedef
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+struct {
+ uint32 sid;
+} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dClearFlag clearFlag;
+ uint32 color;
+ float depth;
+ uint32 stencil;
+ /* Followed by variable number of SVGA3dRect structures */
+} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
+
+typedef
+struct SVGA3dCopyRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+ uint32 srcx;
+ uint32 srcy;
+} SVGA3dCopyRect;
+
+typedef
+struct SVGA3dCopyBox {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+ uint32 srcx;
+ uint32 srcy;
+ uint32 srcz;
+} SVGA3dCopyBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+} SVGA3dRect;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+} SVGA3dBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+} SVGA3dPoint;
+
+typedef
+struct {
+ SVGA3dLightType type;
+ SVGA3dBool inWorldSpace;
+ float diffuse[4];
+ float specular[4];
+ float ambient[4];
+ float position[4];
+ float direction[4];
+ float range;
+ float falloff;
+ float attenuation0;
+ float attenuation1;
+ float attenuation2;
+ float theta;
+ float phi;
+} SVGA3dLightData;
+
+typedef
+struct {
+ uint32 sid;
+ /* Followed by variable number of SVGA3dCopyRect structures */
+} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
+
+typedef
+struct {
+ SVGA3dRenderStateName state;
+ union {
+ uint32 uintValue;
+ float floatValue;
+ };
+} SVGA3dRenderState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dRenderState structures */
+} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRenderTargetType type;
+ SVGA3dSurfaceImageId target;
+} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dStretchBltMode mode;
+} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+struct {
+ /*
+ * If the discard flag is present in a surface DMA operation, the host may
+ * discard the contents of the current mipmap level and face of the target
+ * surface before applying the surface DMA contents.
+ */
+ uint32 discard : 1;
+
+ /*
+ * If the unsynchronized flag is present, the host may perform this upload
+ * without syncing to pending reads on this surface.
+ */
+ uint32 unsynchronized : 1;
+
+ /*
+ * Guests *MUST* set the reserved bits to 0 before submitting the command
+ * suffix as future flags may occupy these bits.
+ */
+ uint32 reserved : 30;
+} SVGA3dSurfaceDMAFlags;
+
+typedef
+struct {
+ SVGA3dGuestImage guest;
+ SVGA3dSurfaceImageId host;
+ SVGA3dTransferType transfer;
+ /*
+ * Followed by variable number of SVGA3dCopyBox structures. For consistency
+ * in all clipping logic and coordinate translation, we define the
+ * "source" in each copyBox as the guest image and the
+ * "destination" as the host image, regardless of transfer
+ * direction.
+ *
+ * For efficiency, the SVGA3D device is free to copy more data than
+ * specified. For example, it may round copy boxes outwards such
+ * that they lie on particular alignment boundaries.
+ */
+} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ * This is a command suffix that will appear after a SurfaceDMA command in
+ * the FIFO. It contains some extra information that hosts may use to
+ * optimize performance or protect the guest. This suffix exists to preserve
+ * backwards compatibility while also allowing for new functionality to be
+ * implemented.
+ */
+
+typedef
+struct {
+ uint32 suffixSize;
+
+ /*
+ * The maximum offset is used to determine the maximum offset from the
+ * guestPtr base address that will be accessed or written to during this
+ * surfaceDMA. If the suffix is supported, the host will respect this
+ * boundary while performing surface DMAs.
+ *
+ * Defaults to MAX_UINT32
+ */
+ uint32 maximumOffset;
+
+ /*
+ * A set of flags that describes optimizations that the host may perform
+ * while performing this surface DMA operation. The guest should never rely
+ * on behaviour that is different when these flags are set for correctness.
+ *
+ * Defaults to 0
+ */
+ SVGA3dSurfaceDMAFlags flags;
+} SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ * This command is the SVGA3D device's generic drawing entry point.
+ * It can draw multiple ranges of primitives, optionally using an
+ * index buffer, using an arbitrary collection of vertex buffers.
+ *
+ * Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ * during this draw call. The declarations specify which surface
+ * the vertex data lives in, what that vertex data is used for,
+ * and how to interpret it.
+ *
+ * Each SVGA3dPrimitiveRange defines a collection of primitives
+ * to render using the same vertex arrays. An index buffer is
+ * optional.
+ */
+
+typedef
+struct {
+ /*
+ * A range hint is an optional specification for the range of indices
+ * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+ * that the entire array will be used.
+ *
+ * These are only hints. The SVGA3D device may use them for
+ * performance optimization if possible, but it's also allowed to
+ * ignore these values.
+ */
+ uint32 first;
+ uint32 last;
+} SVGA3dArrayRangeHint;
+
+typedef
+struct {
+ /*
+ * Define the origin and shape of a vertex or index array. Both
+ * 'offset' and 'stride' are in bytes. The provided surface will be
+ * reinterpreted as a flat array of bytes in the same format used
+ * by surface DMA operations. To avoid unnecessary conversions, the
+ * surface should be created with the SVGA3D_BUFFER format.
+ *
+ * Index 0 in the array starts 'offset' bytes into the surface.
+ * Index 1 begins at byte 'offset + stride', etc. Array indices may
+ * not be negative.
+ */
+ uint32 surfaceId;
+ uint32 offset;
+ uint32 stride;
+} SVGA3dArray;
+
+typedef
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ SVGA3dDeclType type;
+ SVGA3dDeclMethod method;
+ SVGA3dDeclUsage usage;
+ uint32 usageIndex;
+} SVGA3dVertexArrayIdentity;
+
+typedef
+struct {
+ SVGA3dVertexArrayIdentity identity;
+ SVGA3dArray array;
+ SVGA3dArrayRangeHint rangeHint;
+} SVGA3dVertexDecl;
+
+typedef
+struct {
+ /*
+ * Define a group of primitives to render, from sequential indices.
+ *
+ * The value of 'primitiveType' and 'primitiveCount' imply the
+ * total number of vertices that will be rendered.
+ */
+ SVGA3dPrimitiveType primType;
+ uint32 primitiveCount;
+
+ /*
+ * Optional index buffer. If indexArray.surfaceId is
+ * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+ * without an index buffer is identical to rendering with an index
+ * buffer containing the sequence [0, 1, 2, 3, ...].
+ *
+ * If an index buffer is in use, indexWidth specifies the width in
+ * bytes of each index value. It must be less than or equal to
+ * indexArray.stride.
+ *
+ * (Currently, the SVGA3D device requires index buffers to be tightly
+ * packed. In other words, indexWidth == indexArray.stride)
+ */
+ SVGA3dArray indexArray;
+ uint32 indexWidth;
+
+ /*
+ * Optional index bias. This number is added to all indices from
+ * indexArray before they are used as vertex array indices. This
+ * can be used in multiple ways:
+ *
+ * - When not using an indexArray, this bias can be used to
+ * specify where in the vertex arrays to begin rendering.
+ *
+ * - A positive number here is equivalent to increasing the
+ * offset in each vertex array.
+ *
+ * - A negative number can be used to render using a small
+ * vertex array and an index buffer that contains large
+ * values. This may be used by some applications that
+ * crop a vertex buffer without modifying their index
+ * buffer.
+ *
+ * Note that rendering with a negative bias value may be slower and
+ * use more memory than rendering with a positive or zero bias.
+ */
+ int32 indexBias;
+} SVGA3dPrimitiveRange;
+
+typedef
+struct {
+ uint32 cid;
+ uint32 numVertexDecls;
+ uint32 numRanges;
+
+ /*
+ * There are two variable size arrays after the
+ * SVGA3dCmdDrawPrimitives structure. In order,
+ * they are:
+ *
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+ * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+ * the frequency divisor for this the corresponding vertex decl)
+ */
+} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+struct {
+ uint32 stage;
+ SVGA3dTextureStateName name;
+ union {
+ uint32 value;
+ float floatValue;
+ };
+} SVGA3dTextureState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dTextureState structures */
+} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dTransformType type;
+ float matrix[16];
+} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+struct {
+ float min;
+ float max;
+} SVGA3dZRange;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dZRange zRange;
+} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+struct {
+ float diffuse[4];
+ float ambient[4];
+ float specular[4];
+ float emissive[4];
+ float shininess;
+} SVGA3dMaterial;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ SVGA3dLightData data;
+} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ uint32 enabled;
+} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ float plane[4];
+} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+ /* Followed by variable number of DWORDs for shader bycode */
+} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 reg; /* register number */
+ SVGA3dShaderType type;
+ SVGA3dShaderConstType ctype;
+ uint32 values[4];
+} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dShaderType type;
+ uint32 shid;
+} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
+} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
+
+typedef
+struct {
+ uint32 cid; /* Same parameters passed to END_QUERY */
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult;
+} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+struct {
+ uint32 totalSize; /* Set by guest before query is ended. */
+ SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
+ union { /* Set by host on exit from PENDING state */
+ uint32 result32;
+ };
+} SVGA3dQueryResult;
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ * This is a blit from an SVGA3D surface to a Screen Object. Just
+ * like GMR-to-screen blits, this blit may be directed at a
+ * specific screen or to the virtual coordinate space.
+ *
+ * The blit copies from a rectangular region of an SVGA3D surface
+ * image to a rectangular region of a screen or screens.
+ *
+ * This command takes an optional variable-length list of clipping
+ * rectangles after the body of the command. If no rectangles are
+ * specified, there is no clipping region. The entire destRect is
+ * drawn to. If one or more rectangles are included, they describe
+ * a clipping region. The clip rectangle coordinates are measured
+ * relative to the top-left corner of destRect.
+ *
+ * This clipping region serves multiple purposes:
+ *
+ * - It can be used to perform an irregularly shaped blit more
+ * efficiently than by issuing many separate blit commands.
+ *
+ * - It is equivalent to allowing blits with non-integer
+ * source coordinates. You could blit just one half-pixel
+ * of a source, for example, by specifying a larger
+ * destination rectangle than you need, then removing
+ * part of it using a clip rectangle.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ *
+ * Limitations:
+ *
+ * - Currently, no backend supports blits from a mipmap or face
+ * other than the first one.
+ */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId srcImage;
+ SVGASignedRect srcRect;
+ uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
+ SVGASignedRect destRect; /* Supports scaling if src/rest different size */
+ /* Clipping: zero or more SVGASignedRects follow */
+} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+
+/*
+ * Capability query index.
+ *
+ * Notes:
+ *
+ * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ *
+ * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
+ * return TRUE. Even on physical hardware that does not support
+ * these formats natively, the SVGA3D device will provide an emulation
+ * which should be invisible to the guest OS.
+ *
+ * In general, the SVGA3D device should support any operation on
+ * any surface format, it just may perform some of these
+ * operations in software depending on the capabilities of the
+ * available physical hardware.
+ *
+ * XXX: In the future, we will add capabilities that describe in
+ * detail what formats are supported in hardware for what kinds
+ * of operations.
+ */
+
+typedef enum {
+ SVGA3D_DEVCAP_3D = 0,
+ SVGA3D_DEVCAP_MAX_LIGHTS = 1,
+ SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
+ SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
+ SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
+ SVGA3D_DEVCAP_VERTEX_SHADER = 5,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
+ SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
+ SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
+ SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
+ SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
+ SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
+ SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
+ SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
+ SVGA3D_DEVCAP_QUERY_TYPES = 15,
+ SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
+ SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
+ SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
+ SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
+ SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
+ SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
+ SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
+ SVGA3D_DEVCAP_TEXTURE_OPS = 31,
+ SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
+ SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
+ SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
+ SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
+ SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
+ SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
+ SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
+ SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
+ SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
+ SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
+ SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
+
+ /*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does no include the depth or stencil targets.
+ */
+ SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
+
+ SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
+ SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
+ SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
+ SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
+ SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+
+ /*
+ * Don't add new caps into the previous section; the values in this
+ * enumeration must not change. You can put new values right before
+ * SVGA3D_DEVCAP_MAX.
+ */
+ SVGA3D_DEVCAP_MAX /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+typedef union {
+ Bool b;
+ uint32 u;
+ int32 i;
+ float f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 0000000..7b85e9b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ * Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ * 0000: Reserved
+ * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ * that use the SVGA Screen Object extension. Instead of sending
+ * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ * Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
+
+typedef
+struct {
+ uint32 command;
+ uint32 fullscreen;
+ struct {
+ int32 x, y;
+ } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 0000000..f753d73
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ * Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
+#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
+#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+
+typedef enum {
+ SVGA_OVERLAY_FORMAT_INVALID = 0,
+ SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+ SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+ SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
+ /* FIFO escape layout:
+ * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
+ /* FIFO escape layout:
+ * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+ struct {
+ uint32 cmdType;
+ uint32 streamId;
+ } header;
+
+ // May include zero or more items.
+ struct {
+ uint32 registerId;
+ uint32 value;
+ } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+ uint32 cmdType;
+ uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+ uint32 command;
+ uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ * Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ * TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ * Pitches and offsets for the given YUV frame are put in 'pitches'
+ * and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
+ uint32 *width, // IN / OUT
+ uint32 *height, // IN / OUT
+ uint32 *size, // OUT
+ uint32 *pitches, // OUT (optional)
+ uint32 *offsets) // OUT (optional)
+{
+ int tmp;
+
+ *width = (*width + 1) & ~1;
+
+ if (offsets) {
+ offsets[0] = 0;
+ }
+
+ switch (format) {
+ case VMWARE_FOURCC_YV12:
+ *height = (*height + 1) & ~1;
+ *size = (*width + 3) & ~3;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+
+ if (offsets) {
+ offsets[1] = *size;
+ }
+
+ tmp = ((*width >> 1) + 3) & ~3;
+
+ if (pitches) {
+ pitches[1] = pitches[2] = tmp;
+ }
+
+ tmp *= (*height >> 1);
+ *size += tmp;
+
+ if (offsets) {
+ offsets[2] = *size;
+ }
+
+ *size += tmp;
+ break;
+
+ case VMWARE_FOURCC_YUY2:
+ case VMWARE_FOURCC_UYVY:
+ *size = *width * 2;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 0000000..1b96c2e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ * Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+
+/*
+ * PCI device IDs.
+ */
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
+ * The changeMap in the monitor is proportional to this number. Therefore, we'd
+ * like to keep it as small as possible to reduce monitor overhead (using
+ * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
+ * 4k!).
+ *
+ * NB: For compatibility reasons, this value must be greater than 0xff0000.
+ * See bug 335072.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
+#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC 0x900000UL
+#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2 2
+#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+ PALETTE_BASE has moved */
+#define SVGA_VERSION_1 1
+#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0 0
+#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID 0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT 0x0
+#define SVGA_VALUE_PORT 0x1
+#define SVGA_BIOS_PORT 0x2
+#define SVGA_IRQSTATUS_PORT 0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
+
+/*
+ * Registers
+ */
+
+enum {
+ SVGA_REG_ID = 0,
+ SVGA_REG_ENABLE = 1,
+ SVGA_REG_WIDTH = 2,
+ SVGA_REG_HEIGHT = 3,
+ SVGA_REG_MAX_WIDTH = 4,
+ SVGA_REG_MAX_HEIGHT = 5,
+ SVGA_REG_DEPTH = 6,
+ SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
+ SVGA_REG_PSEUDOCOLOR = 8,
+ SVGA_REG_RED_MASK = 9,
+ SVGA_REG_GREEN_MASK = 10,
+ SVGA_REG_BLUE_MASK = 11,
+ SVGA_REG_BYTES_PER_LINE = 12,
+ SVGA_REG_FB_START = 13, /* (Deprecated) */
+ SVGA_REG_FB_OFFSET = 14,
+ SVGA_REG_VRAM_SIZE = 15,
+ SVGA_REG_FB_SIZE = 16,
+
+ /* ID 0 implementation only had the above registers, then the palette */
+
+ SVGA_REG_CAPABILITIES = 17,
+ SVGA_REG_MEM_START = 18, /* (Deprecated) */
+ SVGA_REG_MEM_SIZE = 19,
+ SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
+ SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
+ SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
+ SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
+ SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
+ SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+ SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
+ SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
+ SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
+ SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
+ SVGA_REG_IRQMASK = 33, /* Interrupt mask */
+
+ /* Legacy multi-monitor support */
+ SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+ SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
+ SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+ SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+ SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+ SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
+ SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
+
+ /* See "Guest memory regions" below. */
+ SVGA_REG_GMR_ID = 41,
+ SVGA_REG_GMR_DESCRIPTOR = 42,
+ SVGA_REG_GMR_MAX_IDS = 43,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+ SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
+ SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
+
+ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
+ /* Next 768 (== 256*3) registers exist for colormap */
+
+ SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+ /* Base of scratch registers */
+ /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+ First 4 are reserved for VESA BIOS Extension; any remaining are for
+ the use of the current SVGA driver. */
+};
+
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ * SVGA_REG_GMR_ID --
+ *
+ * Read/write.
+ * This register holds the 32-bit ID (a small positive integer)
+ * of a GMR to create, delete, or redefine. Writing this register
+ * has no side-effects.
+ *
+ * SVGA_REG_GMR_DESCRIPTOR --
+ *
+ * Write-only.
+ * Writing this register will create, delete, or redefine the GMR
+ * specified by the above ID register. If this register is zero,
+ * the GMR is deleted. Any pointers into this GMR (including those
+ * currently being processed by FIFO commands) will be
+ * synchronously invalidated.
+ *
+ * If this register is nonzero, it must be the physical page
+ * number (PPN) of a data structure which describes the physical
+ * layout of the memory region this GMR should describe. The
+ * descriptor structure will be read synchronously by the SVGA
+ * device when this register is written. The descriptor need not
+ * remain allocated for the lifetime of the GMR.
+ *
+ * The guest driver should write SVGA_REG_GMR_ID first, then
+ * SVGA_REG_GMR_DESCRIPTOR.
+ *
+ * SVGA_REG_GMR_MAX_IDS --
+ *
+ * Read-only.
+ * The SVGA device may choose to support a maximum number of
+ * user-defined GMR IDs. This register holds the number of supported
+ * IDs. (The maximum supported ID plus 1)
+ *
+ * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ * Read-only.
+ * The SVGA device may choose to put a limit on the total number
+ * of SVGAGuestMemDescriptor structures it will read when defining
+ * a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ * - Terminate the GMR descriptor list.
+ * (ppn==0, numPages==0)
+ *
+ * - Add a PPN or range of PPNs to the GMR's virtual address space.
+ * (ppn != 0, numPages != 0)
+ *
+ * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ * support multi-page GMR descriptor tables without forcing the
+ * driver to allocate physically contiguous memory.
+ * (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page. The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well. In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
+
+typedef
+struct SVGAGuestMemDescriptor {
+ uint32 ppn;
+ uint32 numPages;
+} SVGAGuestMemDescriptor;
+
+typedef
+struct SVGAGuestPtr {
+ uint32 gmrId;
+ uint32 offset;
+} SVGAGuestPtr;
+
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ * This is a packed representation of the source 2D image format
+ * for a GMR-to-screen blit. Currently it is defined as an encoding
+ * of the screen's color depth and bits-per-pixel, however, 16 bits
+ * are reserved for future use to identify other encodings (such as
+ * RGBA or higher-precision images).
+ *
+ * Currently supported formats:
+ *
+ * bpp depth Format Name
+ * --- ----- -----------
+ * 32 24 32-bit BGRX
+ * 24 24 24-bit BGR
+ * 16 16 RGB 5-6-5
+ * 16 15 RGB 5-5-5
+ *
+ */
+
+typedef
+struct SVGAGMRImageFormat {
+ union {
+ struct {
+ uint32 bitsPerPixel : 8;
+ uint32 colorDepth : 8;
+ uint32 reserved : 16; // Must be zero
+ };
+
+ uint32 value;
+ };
+} SVGAGMRImageFormat;
+
+/*
+ * SVGAColorBGRX --
+ *
+ * A 24-bit color format (BGRX), which does not depend on the
+ * format of the legacy guest framebuffer (GFB) or the current
+ * GMRFB state.
+ */
+
+typedef
+struct SVGAColorBGRX {
+ union {
+ struct {
+ uint32 b : 8;
+ uint32 g : 8;
+ uint32 r : 8;
+ uint32 x : 8; // Unused
+ };
+
+ uint32 value;
+ };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ * Signed rectangle and point primitives. These are used by the new
+ * 2D primitives for drawing to Screen Objects, which can occupy a
+ * signed virtual coordinate space.
+ *
+ * SVGASignedRect specifies a half-open interval: the (left, top)
+ * pixel is part of the rectangle, but the (right, bottom) pixel is
+ * not.
+ */
+
+typedef
+struct SVGASignedRect {
+ int32 left;
+ int32 top;
+ int32 right;
+ int32 bottom;
+} SVGASignedRect;
+
+typedef
+struct SVGASignedPoint {
+ int32 x;
+ int32 y;
+} SVGASignedPoint;
+
+
+/*
+ * Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ */
+
+#define SVGA_CAP_NONE 0x00000000
+#define SVGA_CAP_RECT_COPY 0x00000002
+#define SVGA_CAP_CURSOR 0x00000020
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_8BIT_EMULATION 0x00000100
+#define SVGA_CAP_ALPHA_CURSOR 0x00000200
+#define SVGA_CAP_3D 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO 0x00008000
+#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
+#define SVGA_CAP_PITCHLOCK 0x00020000
+#define SVGA_CAP_IRQMASK 0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
+#define SVGA_CAP_GMR 0x00100000
+#define SVGA_CAP_TRACES 0x00200000
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem. It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ * following data area is and how much data it contains; there may be
+ * more registers following these, depending on the FIFO protocol
+ * version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+ /*
+ * Block 1 (basic registers): The originally defined FIFO registers.
+ * These exist and are valid for all versions of the FIFO protocol.
+ */
+
+ SVGA_FIFO_MIN = 0,
+ SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
+ SVGA_FIFO_NEXT_CMD,
+ SVGA_FIFO_STOP,
+
+ /*
+ * Block 2 (extended registers): Mandatory registers for the extended
+ * FIFO. These exist if the SVGA caps register includes
+ * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+ * associated capability bit is enabled.
+ *
+ * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+ * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+ * This means that the guest has to test individually (in most cases
+ * using FIFO caps) for the presence of registers after this; the VMX
+ * can define "extended FIFO" to mean whatever it wants, and currently
+ * won't enable it unless there's room for that set and much more.
+ */
+
+ SVGA_FIFO_CAPABILITIES = 4,
+ SVGA_FIFO_FLAGS,
+ // Valid with SVGA_FIFO_CAP_FENCE:
+ SVGA_FIFO_FENCE,
+
+ /*
+ * Block 3a (optional extended registers): Additional registers for the
+ * extended FIFO, whose presence isn't actually implied by
+ * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+ * leave room for them.
+ *
+ * These in block 3a, the VMX currently considers mandatory for the
+ * extended FIFO.
+ */
+
+ // Valid if exists (i.e. if extended FIFO enabled):
+ SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
+ // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+ SVGA_FIFO_PITCHLOCK,
+
+ // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+ SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
+ SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
+ SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
+ SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
+ SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+ // Valid with SVGA_FIFO_CAP_RESERVE:
+ SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
+
+ /*
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+ *
+ * By default this is SVGA_ID_INVALID, to indicate that the cursor
+ * coordinates are specified relative to the virtual root. If this
+ * is set to a specific screen ID, cursor position is reinterpreted
+ * as a signed offset relative to that screen's origin. This is the
+ * only way to place the cursor on a non-rooted screen.
+ */
+ SVGA_FIFO_CURSOR_SCREEN_ID,
+
+ /*
+ * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+ * registers, but this must be done carefully and with judicious use of
+ * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+ * enough to tell you whether the register exists: we've shipped drivers
+ * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+ * the earlier ones. The actual order of introduction was:
+ * - PITCHLOCK
+ * - 3D_CAPS
+ * - CURSOR_* (cursor bypass 3)
+ * - RESERVED
+ * So, code that wants to know whether it can use any of the
+ * aforementioned registers, or anything else added after PITCHLOCK and
+ * before 3D_CAPS, needs to reason about something other than
+ * SVGA_FIFO_MIN.
+ */
+
+ /*
+ * 3D caps block space; valid with 3D hardware version >=
+ * SVGA3D_HWVERSION_WS6_B1.
+ */
+ SVGA_FIFO_3D_CAPS = 32,
+ SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+ /*
+ * End of VMX's current definition of "extended-FIFO registers".
+ * Registers before here are always enabled/disabled as a block; either
+ * the extended FIFO is enabled and includes all preceding registers, or
+ * it's disabled entirely.
+ *
+ * Block 3b (truly optional extended registers): Additional registers for
+ * the extended FIFO, which the VMX already knows how to enable and
+ * disable with correct granularity.
+ *
+ * Registers after here exist if and only if the guest SVGA driver
+ * sets SVGA_FIFO_MIN high enough to leave room for them.
+ */
+
+ // Valid if register exists:
+ SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+ SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+ SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
+
+ /*
+ * Always keep this last. This defines the maximum number of
+ * registers we know about. At power-on, this value is placed in
+ * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+ * to allocate this much space in FIFO memory for registers.
+ */
+ SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data. It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ * This explains the relationship between the various FIFO
+ * sync-related registers in IOSpace and in FIFO space.
+ *
+ * SVGA_REG_SYNC --
+ *
+ * The SYNC register can be used in two different ways by the guest:
+ *
+ * 1. If the guest wishes to fully sync (drain) the FIFO,
+ * it will write once to SYNC then poll on the BUSY
+ * register. The FIFO is sync'ed once BUSY is zero.
+ *
+ * 2. If the guest wants to asynchronously wake up the host,
+ * it will write once to SYNC without polling on BUSY.
+ * Ideally it will do this after some new commands have
+ * been placed in the FIFO, and after reading a zero
+ * from SVGA_FIFO_BUSY.
+ *
+ * (1) is the original behaviour that SYNC was designed to
+ * support. Originally, a write to SYNC would implicitly
+ * trigger a read from BUSY. This causes us to synchronously
+ * process the FIFO.
+ *
+ * This behaviour has since been changed so that writing SYNC
+ * will *not* implicitly cause a read from BUSY. Instead, it
+ * makes a channel call which asynchronously wakes up the MKS
+ * thread.
+ *
+ * New guests can use this new behaviour to implement (2)
+ * efficiently. This lets guests get the host's attention
+ * without waiting for the MKS to poll, which gives us much
+ * better CPU utilization on SMP hosts and on UP hosts while
+ * we're blocked on the host GPU.
+ *
+ * Old guests shouldn't notice the behaviour change. SYNC was
+ * never guaranteed to process the entire FIFO, since it was
+ * bounded to a particular number of CPU cycles. Old guests will
+ * still loop on the BUSY register until the FIFO is empty.
+ *
+ * Writing to SYNC currently has the following side-effects:
+ *
+ * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ * - Asynchronously wakes up the MKS thread for FIFO processing
+ * - The value written to SYNC is recorded as a "reason", for
+ * stats purposes.
+ *
+ * If SVGA_FIFO_BUSY is available, drivers are advised to only
+ * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ * eventually set SVGA_FIFO_BUSY on its own, but this approach
+ * lets the driver avoid sending multiple asynchronous wakeup
+ * messages to the MKS thread.
+ *
+ * SVGA_REG_BUSY --
+ *
+ * This register is set to TRUE when SVGA_REG_SYNC is written,
+ * and it reads as FALSE when the FIFO has been completely
+ * drained.
+ *
+ * Every read from this register causes us to synchronously
+ * process FIFO commands. There is no guarantee as to how many
+ * commands each read will process.
+ *
+ * CPU time spent processing FIFO commands will be billed to
+ * the guest.
+ *
+ * New drivers should avoid using this register unless they
+ * need to guarantee that the FIFO is completely drained. It
+ * is overkill for performing a sync-to-fence. Older drivers
+ * will use this register for any type of synchronization.
+ *
+ * SVGA_FIFO_BUSY --
+ *
+ * This register is a fast way for the guest driver to check
+ * whether the FIFO is already being processed. It reads and
+ * writes at normal RAM speeds, with no monitor intervention.
+ *
+ * If this register reads as TRUE, the host is guaranteeing that
+ * any new commands written into the FIFO will be noticed before
+ * the MKS goes back to sleep.
+ *
+ * If this register reads as FALSE, no such guarantee can be
+ * made.
+ *
+ * The guest should use this register to quickly determine
+ * whether or not it needs to wake up the host. If the guest
+ * just wrote a command or group of commands that it would like
+ * the host to begin processing, it should:
+ *
+ * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ * action is necessary.
+ *
+ * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ * code that we've already sent a SYNC to the host and we
+ * don't need to send a duplicate.
+ *
+ * 3. Write a reason to SVGA_REG_SYNC. This will send an
+ * asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ * Fence -- Fence register and command are supported
+ * Accel Front -- Front buffer only commands are supported
+ * Pitch Lock -- Pitch lock register is supported
+ * Video -- SVGA Video overlay units are supported
+ * Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ * of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ * Provides dynamic multi-screen rendering, for improved Unity and
+ * multi-monitor modes. With Screen Object, the guest can
+ * dynamically create and destroy 'screens', which can represent
+ * Unity windows or virtual monitors. Screen Object also provides
+ * strong guarantees that DMA operations happen only when
+ * guest-initiated. Screen Object deprecates the BAR1 guest
+ * framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ * New registers:
+ * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ * New 2D commands:
+ * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ * New 3D commands:
+ * BLIT_SURFACE_TO_SCREEN
+ *
+ * New guarantees:
+ *
+ * - The host will not read or write guest memory, including the GFB,
+ * except when explicitly initiated by a DMA command.
+ *
+ * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ * is guaranteed to complete before any subsequent FENCEs.
+ *
+ * - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ * PRESENT_READBACK) as well as new Screen blit commands will
+ * all behave consistently as blits, and memory will be read
+ * or written in FIFO order.
+ *
+ * For example, if you PRESENT from one SVGA3D surface to multiple
+ * places on the screen, the data copied will always be from the
+ * SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ * This was not necessarily true on devices without Screen Object.
+ *
+ * This means that on devices that support Screen Object, the
+ * PRESENT_READBACK command should not be necessary unless you
+ * actually want to read back the results of 3D rendering into
+ * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ * command provides a strict superset of functionality.)
+ *
+ * - When a screen is resized, either using Screen Object commands or
+ * legacy multimon registers, its contents are preserved.
+ */
+
+#define SVGA_FIFO_CAP_NONE 0
+#define SVGA_FIFO_CAP_FENCE (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
+#define SVGA_FIFO_CAP_VIDEO (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE (1<<5)
+#define SVGA_FIFO_CAP_RESERVE (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
+
+
+/*
+ * FIFO Flags
+ *
+ * Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE 0
+#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+ SVGA_VIDEO_ENABLED = 0,
+ SVGA_VIDEO_FLAGS,
+ SVGA_VIDEO_DATA_OFFSET,
+ SVGA_VIDEO_FORMAT,
+ SVGA_VIDEO_COLORKEY,
+ SVGA_VIDEO_SIZE, // Deprecated
+ SVGA_VIDEO_WIDTH,
+ SVGA_VIDEO_HEIGHT,
+ SVGA_VIDEO_SRC_X,
+ SVGA_VIDEO_SRC_Y,
+ SVGA_VIDEO_SRC_WIDTH,
+ SVGA_VIDEO_SRC_HEIGHT,
+ SVGA_VIDEO_DST_X, // Signed int32
+ SVGA_VIDEO_DST_Y, // Signed int32
+ SVGA_VIDEO_DST_WIDTH,
+ SVGA_VIDEO_DST_HEIGHT,
+ SVGA_VIDEO_PITCH_1,
+ SVGA_VIDEO_PITCH_2,
+ SVGA_VIDEO_PITCH_3,
+ SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
+ SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+ SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ * width and height relate to the entire source video frame.
+ * srcX, srcY, srcWidth and srcHeight represent subset of the source
+ * video frame to be displayed.
+ */
+
+typedef struct SVGAOverlayUnit {
+ uint32 enabled;
+ uint32 flags;
+ uint32 dataOffset;
+ uint32 format;
+ uint32 colorKey;
+ uint32 size;
+ uint32 width;
+ uint32 height;
+ uint32 srcX;
+ uint32 srcY;
+ uint32 srcWidth;
+ uint32 srcHeight;
+ int32 dstX;
+ int32 dstY;
+ uint32 dstWidth;
+ uint32 dstHeight;
+ uint32 pitches[3];
+ uint32 dataGMRId;
+ uint32 dstScreenId;
+} SVGAOverlayUnit;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ * This is a new way to represent a guest's multi-monitor screen or
+ * Unity window. Screen objects are only supported if the
+ * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ * If Screen Objects are supported, they can be used to fully
+ * replace the functionality provided by the framebuffer registers
+ * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ * The screen object is a struct with guaranteed binary
+ * compatibility. New flags can be added, and the struct may grow,
+ * but existing fields must retain their meaning.
+ *
+ */
+
+#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
+
+typedef
+struct SVGAScreenObject {
+ uint32 structSize; // sizeof(SVGAScreenObject)
+ uint32 id;
+ uint32 flags;
+ struct {
+ uint32 width;
+ uint32 height;
+ } size;
+ struct {
+ int32 x;
+ int32 y;
+ } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
+} SVGAScreenObject;
+
+
+/*
+ * Commands in the command FIFO:
+ *
+ * Command IDs defined below are used for the traditional 2D FIFO
+ * communication (not all commands are available for all versions of the
+ * SVGA FIFO protocol).
+ *
+ * Note the holes in the command ID numbers: These commands have been
+ * deprecated, and the old IDs must not be reused.
+ *
+ * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ * protocol.
+ *
+ * Each command's parameters are described by the comments and
+ * structs below.
+ */
+
+typedef enum {
+ SVGA_CMD_INVALID_CMD = 0,
+ SVGA_CMD_UPDATE = 1,
+ SVGA_CMD_RECT_COPY = 3,
+ SVGA_CMD_DEFINE_CURSOR = 19,
+ SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
+ SVGA_CMD_UPDATE_VERBOSE = 25,
+ SVGA_CMD_FRONT_ROP_FILL = 29,
+ SVGA_CMD_FENCE = 30,
+ SVGA_CMD_ESCAPE = 33,
+ SVGA_CMD_DEFINE_SCREEN = 34,
+ SVGA_CMD_DESTROY_SCREEN = 35,
+ SVGA_CMD_DEFINE_GMRFB = 36,
+ SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
+ SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
+ SVGA_CMD_ANNOTATION_FILL = 39,
+ SVGA_CMD_ANNOTATION_COPY = 40,
+ SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_ARGS 64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ * This is a DMA transfer which copies from the Guest Framebuffer
+ * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ * intersect with the provided virtual rectangle.
+ *
+ * This command does not support using arbitrary guest memory as a
+ * data source- it only works with the pre-defined GFB memory.
+ * This command also does not support signed virtual coordinates.
+ * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ * negative root x/y coordinates, the negative portion of those
+ * screens will not be reachable by this command.
+ *
+ * This command is not necessary when using framebuffer
+ * traces. Traces are automatically enabled if the SVGA FIFO is
+ * disabled, and you may explicitly enable/disable traces using
+ * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ * Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ * pseudocolor screen updates. The newer Screen Object commands
+ * only support true color formats.
+ *
+ * Availability:
+ * Always available.
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ * Perform a rectangular DMA transfer from one area of the GFB to
+ * another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ * SVGA_CAP_RECT_COPY
+ */
+
+typedef
+struct {
+ uint32 srcX;
+ uint32 srcY;
+ uint32 destX;
+ uint32 destY;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ * Provide a new cursor image, as an AND/XOR mask.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ /*
+ * Followed by scanline data for AND mask, then XOR mask.
+ * Each scanline is padded to a 32-bit boundary.
+ */
+} SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ * Provide a new cursor image, in 32-bit BGRA format.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ /* Followed by scanline data */
+} SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ * 'reason' value, an opaque cookie which is used by internal
+ * debugging tools. Third party drivers should not use this
+ * command.
+ *
+ * Availability:
+ * SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 reason;
+} SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ * This is a hint which tells the SVGA device that the driver has
+ * just filled a rectangular region of the GFB with a solid
+ * color. Instead of reading these pixels from the GFB, the device
+ * can assume that they all equal 'color'. This is primarily used
+ * for remote desktop protocols.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define SVGA_ROP_COPY 0x03
+
+typedef
+struct {
+ uint32 color; // In the same format as the GFB
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 rop; // Must be SVGA_ROP_COPY
+} SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ * Insert a synchronization fence. When the SVGA device reaches
+ * this command, it will copy the 'fence' value into the
+ * SVGA_FIFO_FENCE register. It will also compare the fence against
+ * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ * raise this interrupt.
+ *
+ * Availability:
+ * SVGA_FIFO_FENCE for this command,
+ * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+struct {
+ uint32 fence;
+} SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ * Send an extended or vendor-specific variable length command.
+ * This is used for video overlay, third party plugins, and
+ * internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+struct {
+ uint32 nsid;
+ uint32 size;
+ /* followed by 'size' bytes of data */
+} SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ * Define or redefine an SVGAScreenObject. See the description of
+ * SVGAScreenObject above. The video driver is responsible for
+ * generating new screen IDs. They should be small positive
+ * integers. The virtual device will have an implementation
+ * specific upper limit on the number of screen IDs
+ * supported. Drivers are responsible for recycling IDs. The first
+ * valid ID is zero.
+ *
+ * - Interaction with other registers:
+ *
+ * For backwards compatibility, when the GFB mode registers (WIDTH,
+ * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ * deletes all screens other than screen #0, and redefines screen
+ * #0 according to the specified mode. Drivers that use
+ * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ * If you use screen objects, do not use the legacy multi-mon
+ * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAScreenObject screen; // Variable-length according to version
+} SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ * Destroy an SVGAScreenObject. Its ID is immediately available for
+ * re-use.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ uint32 screenId;
+} SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ * This command sets a piece of SVGA device state called the
+ * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ * piece of light-weight state which identifies the location and
+ * format of an image in guest memory or in BAR1. The GMRFB has
+ * an arbitrary size, and it doesn't need to match the geometry
+ * of the GFB or any screen object.
+ *
+ * The GMRFB can be redefined as often as you like. You could
+ * always use the same GMRFB, you could redefine it before
+ * rendering from a different guest screen, or you could even
+ * redefine it before every blit.
+ *
+ * There are multiple ways to use this command. The simplest way is
+ * to use it to move the framebuffer either to elsewhere in the GFB
+ * (BAR1) memory region, or to a user-defined GMR. This lets a
+ * driver use a framebuffer allocated entirely out of normal system
+ * memory, which we encourage.
+ *
+ * Another way to use this command is to set up a ring buffer of
+ * updates in GFB memory. If a driver wants to ensure that no
+ * frames are skipped by the SVGA device, it is important that the
+ * driver not modify the source data for a blit until the device is
+ * done processing the command. One efficient way to accomplish
+ * this is to use a ring of small DMA buffers. Each buffer is used
+ * for one blit, then we move on to the next buffer in the
+ * ring. The FENCE mechanism is used to protect each buffer from
+ * re-use until the device is finished with that buffer's
+ * corresponding blit.
+ *
+ * This command does not affect the meaning of SVGA_CMD_UPDATE.
+ * UPDATEs always occur from the legacy GFB memory area. This
+ * command has no support for pseudocolor GMRFBs. Currently only
+ * true-color 15, 16, and 24-bit depths are supported. Future
+ * devices may expose capabilities for additional framebuffer
+ * formats.
+ *
+ * The default GMRFB value is undefined. Drivers must always send
+ * this command at least once before performing any blit from the
+ * GMRFB.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAGuestPtr ptr;
+ uint32 bytesPerLine;
+ SVGAGMRImageFormat format;
+} SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ * This is a guest-to-host blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from the current GMRFB to
+ * one or more Screen Objects.
+ *
+ * The destination coordinate may be specified relative to a
+ * screen's origin (if a screen ID is specified) or relative to the
+ * virtual coordinate system's origin (if the screen ID is
+ * SVGA_ID_INVALID). The actual destination may span zero or more
+ * screens, in the case of a virtual destination rect or a rect
+ * which extends off the edge of the specified screen.
+ *
+ * This command writes to the screen's "base layer": the underlying
+ * framebuffer which exists below any cursor or video overlays. No
+ * action is necessary to explicitly hide or update any overlays
+ * which exist on top of the updated region.
+ *
+ * The SVGA device is guaranteed to finish reading from the GMRFB
+ * by the time any subsequent FENCE commands are reached.
+ *
+ * This command consumes an annotation. See the
+ * SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ SVGASignedRect destRect;
+ uint32 destScreenId;
+} SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ * This is a host-to-guest blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from a single Screen Object
+ * back to the current GMRFB.
+ *
+ * Usage note: This command should be used rarely. It will
+ * typically be inefficient, but it is necessary for some types of
+ * synchronization between 3D (GPU) and 2D (CPU) rendering into
+ * overlapping areas of a screen.
+ *
+ * The source coordinate is specified relative to a screen's
+ * origin. The provided screen ID must be valid. If any parameters
+ * are invalid, the resulting pixel values are undefined.
+ *
+ * This command reads the screen's "base layer". Overlays like
+ * video and cursor are not included, but any data which was sent
+ * using a blit-to-screen primitive will be available, no matter
+ * whether the data's original source was the GMRFB or the 3D
+ * acceleration hardware.
+ *
+ * Note that our guest-to-host blits and host-to-guest blits aren't
+ * symmetric in their current implementation. While the parameters
+ * are identical, host-to-guest blits are a lot less featureful.
+ * They do not support clipping: If the source parameters don't
+ * fully fit within a screen, the blit fails. They must originate
+ * from exactly one screen. Virtual coordinates are not directly
+ * supported.
+ *
+ * Host-to-guest blits do support the same set of GMRFB formats
+ * offered by guest-to-host blits.
+ *
+ * The SVGA device is guaranteed to finish writing to the GMRFB by
+ * the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint destOrigin;
+ SVGASignedRect srcRect;
+ uint32 srcScreenId;
+} SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ * This is a blit annotation. This command stores a small piece of
+ * device state which is consumed by the next blit-to-screen
+ * command. The state is only cleared by commands which are
+ * specifically documented as consuming an annotation. Other
+ * commands (such as ESCAPEs for debugging) may intervene between
+ * the annotation and its associated blit.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value, specified here as a color in
+ * SVGAColorBGRX format.
+ *
+ * The SVGA device can still render the blit correctly even if it
+ * ignores this annotation, but the annotation may allow it to
+ * perform the blit more efficiently, for example by ignoring the
+ * source data and performing a fill in hardware.
+ *
+ * This annotation is most important for performance when the
+ * user's display is being remoted over a network connection.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAColorBGRX color;
+} SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
+ * information about annotations.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value as those which already exist at an
+ * identically-sized region on the same or a different screen.
+ *
+ * Note that the source pixels for the COPY in this annotation are
+ * sampled before applying the anqnotation's associated blit. They
+ * are allowed to overlap with the blit's destination pixels.
+ *
+ * The copy source rectangle is specified the same way as the blit
+ * destination: it can be a rectangle which spans zero or more
+ * screens, specified relative to either a screen or to the virtual
+ * coordinate system's origin. If the source rectangle includes
+ * pixels which are not from exactly one screen, the results are
+ * undefined.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ uint32 srcScreenId;
+} SVGAFifoCmdAnnotationCopy;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 0000000..55836de
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Silly typedefs for the svga headers. Currently the headers are shared
+ * between all components that talk to svga. And as such the headers are
+ * are in a completely different style and use weird defines.
+ *
+ * This file lets all the ugly be prefixed with svga*.
+ */
+
+#ifndef _SVGA_TYPES_H_
+#define _SVGA_TYPES_H_
+
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint8_t uint8;
+typedef int32_t int32;
+typedef bool Bool;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 0000000..825ebe3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,252 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+
+static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED;
+
+static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
+static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED;
+
+struct ttm_placement vmw_vram_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_placement_flags
+};
+
+struct ttm_placement vmw_vram_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct vmw_ttm_backend {
+ struct ttm_backend backend;
+};
+
+static int vmw_ttm_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ return 0;
+}
+
+static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ return 0;
+}
+
+static int vmw_ttm_unbind(struct ttm_backend *backend)
+{
+ return 0;
+}
+
+static void vmw_ttm_clear(struct ttm_backend *backend)
+{
+}
+
+static void vmw_ttm_destroy(struct ttm_backend *backend)
+{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ kfree(vmw_be);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+ .populate = vmw_ttm_populate,
+ .clear = vmw_ttm_clear,
+ .bind = vmw_ttm_bind,
+ .unbind = vmw_ttm_unbind,
+ .destroy = vmw_ttm_destroy,
+};
+
+struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+{
+ struct vmw_ttm_backend *vmw_be;
+
+ vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+ if (!vmw_be)
+ return NULL;
+
+ vmw_be->backend.func = &vmw_ttm_func;
+
+ return &vmw_be->backend;
+}
+
+int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ /* "On-card" video ram */
+ man->gpu_offset = 0;
+ man->io_offset = dev_priv->vram_start;
+ man->io_size = dev_priv->vram_size;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->io_addr = NULL;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void vmw_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = vmw_sys_placement;
+}
+
+/**
+ * FIXME: Proper access checks on buffers.
+ */
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem)
+{
+ if (new_mem->mem_type != TTM_PL_SYSTEM)
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+ vmw_dmabuf_gmr_unbind(bo);
+}
+
+/**
+ * FIXME: We're using the old vmware polling method to sync.
+ * Do this with fences instead.
+ */
+
+static void *vmw_sync_obj_ref(void *sync_obj)
+{
+ return sync_obj;
+}
+
+static void vmw_sync_obj_unref(void **sync_obj)
+{
+ *sync_obj = NULL;
+}
+
+static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ mutex_unlock(&dev_priv->hw_mutex);
+ return 0;
+}
+
+static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_fence_signaled(dev_priv, sequence);
+}
+
+static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
+ bool lazy, bool interruptible)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
+}
+
+struct ttm_bo_driver vmw_bo_driver = {
+ .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .invalidate_caches = vmw_invalidate_caches,
+ .init_mem_type = vmw_init_mem_type,
+ .evict_flags = vmw_evict_flags,
+ .move = NULL,
+ .verify_access = vmw_verify_access,
+ .sync_obj_signaled = vmw_sync_obj_signaled,
+ .sync_obj_wait = vmw_sync_obj_wait,
+ .sync_obj_flush = vmw_sync_obj_flush,
+ .sync_obj_unref = vmw_sync_obj_unref,
+ .sync_obj_ref = vmw_sync_obj_ref,
+ .move_notify = vmw_move_notify,
+ .swap_notify = vmw_swap_notify
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 0000000..0c9c081
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,783 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+#define VMWGFX_CHIP_SVGAII 0
+#define VMW_FB_RESERVATION 0
+
+/**
+ * Fully encoded drm commands. Might move to vmw_drm.h
+ */
+
+#define DRM_IOCTL_VMW_GET_PARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
+ struct drm_vmw_getparam_arg)
+#define DRM_IOCTL_VMW_ALLOC_DMABUF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
+ union drm_vmw_alloc_dmabuf_arg)
+#define DRM_IOCTL_VMW_UNREF_DMABUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
+ struct drm_vmw_unref_dmabuf_arg)
+#define DRM_IOCTL_VMW_CURSOR_BYPASS \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
+ struct drm_vmw_cursor_bypass_arg)
+
+#define DRM_IOCTL_VMW_CONTROL_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
+ struct drm_vmw_control_stream_arg)
+#define DRM_IOCTL_VMW_CLAIM_STREAM \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
+ struct drm_vmw_stream_arg)
+#define DRM_IOCTL_VMW_UNREF_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
+ struct drm_vmw_stream_arg)
+
+#define DRM_IOCTL_VMW_CREATE_CONTEXT \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_UNREF_CONTEXT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_CREATE_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
+ union drm_vmw_surface_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SURFACE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
+ struct drm_vmw_surface_arg)
+#define DRM_IOCTL_VMW_REF_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
+ union drm_vmw_surface_reference_arg)
+#define DRM_IOCTL_VMW_EXECBUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
+ struct drm_vmw_execbuf_arg)
+#define DRM_IOCTL_VMW_FIFO_DEBUG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
+ struct drm_vmw_fifo_debug_arg)
+#define DRM_IOCTL_VMW_FENCE_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
+ struct drm_vmw_fence_wait_arg)
+
+
+/**
+ * The core DRM version of this macro doesn't account for
+ * DRM_COMMAND_BASE.
+ */
+
+#define VMW_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+/**
+ * Ioctl definitions.
+ */
+
+static struct drm_ioctl_desc vmw_ioctls[] = {
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ vmw_kms_cursor_bypass_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ DRM_AUTH | DRM_UNLOCKED)
+};
+
+static struct pci_device_id vmw_pci_id_list[] = {
+ {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+ {0, 0, 0}
+};
+
+static char *vmw_devname = "vmwgfx";
+
+static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+static void vmw_master_init(struct vmw_master *);
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr);
+
+static void vmw_print_capabilities(uint32_t capabilities)
+{
+ DRM_INFO("Capabilities:\n");
+ if (capabilities & SVGA_CAP_RECT_COPY)
+ DRM_INFO(" Rect copy.\n");
+ if (capabilities & SVGA_CAP_CURSOR)
+ DRM_INFO(" Cursor.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS)
+ DRM_INFO(" Cursor bypass.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
+ DRM_INFO(" Cursor bypass 2.\n");
+ if (capabilities & SVGA_CAP_8BIT_EMULATION)
+ DRM_INFO(" 8bit emulation.\n");
+ if (capabilities & SVGA_CAP_ALPHA_CURSOR)
+ DRM_INFO(" Alpha cursor.\n");
+ if (capabilities & SVGA_CAP_3D)
+ DRM_INFO(" 3D.\n");
+ if (capabilities & SVGA_CAP_EXTENDED_FIFO)
+ DRM_INFO(" Extended Fifo.\n");
+ if (capabilities & SVGA_CAP_MULTIMON)
+ DRM_INFO(" Multimon.\n");
+ if (capabilities & SVGA_CAP_PITCHLOCK)
+ DRM_INFO(" Pitchlock.\n");
+ if (capabilities & SVGA_CAP_IRQMASK)
+ DRM_INFO(" Irq mask.\n");
+ if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
+ DRM_INFO(" Display Topology.\n");
+ if (capabilities & SVGA_CAP_GMR)
+ DRM_INFO(" GMR.\n");
+ if (capabilities & SVGA_CAP_TRACES)
+ DRM_INFO(" Traces.\n");
+}
+
+static int vmw_request_device(struct vmw_private *dev_priv)
+{
+ int ret;
+
+ vmw_kms_save_vga(dev_priv);
+
+ ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize FIFO.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vmw_release_device(struct vmw_private *dev_priv)
+{
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_kms_restore_vga(dev_priv);
+}
+
+
+static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct vmw_private *dev_priv;
+ int ret;
+ uint32_t svga_id;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (unlikely(dev_priv == NULL)) {
+ DRM_ERROR("Failed allocating a device private struct.\n");
+ return -ENOMEM;
+ }
+ memset(dev_priv, 0, sizeof(*dev_priv));
+
+ dev_priv->dev = dev;
+ dev_priv->vmw_chipset = chipset;
+ dev_priv->last_read_sequence = (uint32_t) -100;
+ mutex_init(&dev_priv->hw_mutex);
+ mutex_init(&dev_priv->cmdbuf_mutex);
+ rwlock_init(&dev_priv->resource_lock);
+ idr_init(&dev_priv->context_idr);
+ idr_init(&dev_priv->surface_idr);
+ idr_init(&dev_priv->stream_idr);
+ ida_init(&dev_priv->gmr_ida);
+ mutex_init(&dev_priv->init_mutex);
+ init_waitqueue_head(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fifo_queue);
+ atomic_set(&dev_priv->fence_queue_waiters, 0);
+ atomic_set(&dev_priv->fifo_queue_waiters, 0);
+ INIT_LIST_HEAD(&dev_priv->gmr_lru);
+
+ dev_priv->io_start = pci_resource_start(dev->pdev, 0);
+ dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+ if (svga_id != SVGA_ID_2) {
+ ret = -ENOSYS;
+ DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
+ mutex_unlock(&dev_priv->hw_mutex);
+ goto out_err0;
+ }
+
+ dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ dev_priv->max_gmr_descriptors =
+ vmw_read(dev_priv,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ dev_priv->max_gmr_ids =
+ vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
+ }
+
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ vmw_print_capabilities(dev_priv->capabilities);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ DRM_INFO("Max GMR ids is %u\n",
+ (unsigned)dev_priv->max_gmr_ids);
+ DRM_INFO("Max GMR descriptors is %u\n",
+ (unsigned)dev_priv->max_gmr_descriptors);
+ }
+ DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
+ dev_priv->vram_start, dev_priv->vram_size / 1024);
+ DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
+ dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+
+ ret = vmw_ttm_global_init(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+
+ vmw_master_init(&dev_priv->fbdev_master);
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ dev_priv->active_master = &dev_priv->fbdev_master;
+
+
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_err1;
+ }
+
+ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+ (dev_priv->vram_size >> PAGE_SHIFT));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+ goto out_err2;
+ }
+
+ dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+
+ dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
+ dev_priv->mmio_size);
+
+ if (unlikely(dev_priv->mmio_virt == NULL)) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed mapping MMIO.\n");
+ goto out_err3;
+ }
+
+ dev_priv->tdev = ttm_object_device_init
+ (dev_priv->mem_global_ref.object, 12);
+
+ if (unlikely(dev_priv->tdev == NULL)) {
+ DRM_ERROR("Unable to initialize TTM object management.\n");
+ ret = -ENOMEM;
+ goto out_err4;
+ }
+
+ dev->dev_private = dev_priv;
+
+ if (!dev->devname)
+ dev->devname = vmw_devname;
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
+ ret = pci_request_regions(dev->pdev, "vmwgfx probe");
+ dev_priv->stealth = (ret != 0);
+ if (dev_priv->stealth) {
+ /**
+ * Request at least the mmio PCI resource.
+ */
+
+ DRM_INFO("It appears like vesafb is loaded. "
+ "Ignore above error if any.\n");
+ ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
+ goto out_no_device;
+ }
+ }
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_device;
+ vmw_kms_init(dev_priv);
+ vmw_overlay_init(dev_priv);
+ vmw_fb_init(dev_priv);
+
+ dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+ register_pm_notifier(&dev_priv->pm_nb);
+
+ DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
+
+ return 0;
+
+out_no_device:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+out_no_irq:
+ ttm_object_device_release(&dev_priv->tdev);
+out_err4:
+ iounmap(dev_priv->mmio_virt);
+out_err3:
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_err2:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+out_err1:
+ vmw_ttm_global_release(dev_priv);
+out_err0:
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+ kfree(dev_priv);
+ return ret;
+}
+
+static int vmw_driver_unload(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
+
+ unregister_pm_notifier(&dev_priv->pm_nb);
+
+ vmw_fb_close(dev_priv);
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
+ vmw_release_device(dev_priv);
+ if (dev_priv->stealth)
+ pci_release_region(dev->pdev, 2);
+ else
+ pci_release_regions(dev->pdev);
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+ ttm_object_device_release(&dev_priv->tdev);
+ iounmap(dev_priv->mmio_virt);
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ vmw_ttm_global_release(dev_priv);
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+
+ kfree(dev_priv);
+
+ return 0;
+}
+
+static void vmw_postclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vmw_fpriv *vmw_fp;
+
+ vmw_fp = vmw_fpriv(file_priv);
+ ttm_object_file_release(&vmw_fp->tfile);
+ if (vmw_fp->locked_master)
+ drm_master_put(&vmw_fp->locked_master);
+ kfree(vmw_fp);
+}
+
+static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp;
+ int ret = -ENOMEM;
+
+ vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
+ if (unlikely(vmw_fp == NULL))
+ return ret;
+
+ vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+ if (unlikely(vmw_fp->tfile == NULL))
+ goto out_no_tfile;
+
+ file_priv->driver_priv = vmw_fp;
+
+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
+ dev_priv->bdev.dev_mapping =
+ file_priv->filp->f_path.dentry->d_inode->i_mapping;
+
+ return 0;
+
+out_no_tfile:
+ kfree(vmw_fp);
+ return ret;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ /*
+ * Do extra checking on driver private ioctls.
+ */
+
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ struct drm_ioctl_desc *ioctl =
+ &vmw_ioctls[nr - DRM_COMMAND_BASE];
+
+ if (unlikely(ioctl->cmd != cmd)) {
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+ return -EINVAL;
+ }
+ }
+
+ return drm_ioctl(filp, cmd, arg);
+}
+
+static int vmw_firstopen(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ dev_priv->is_opened = true;
+
+ return 0;
+}
+
+static void vmw_lastclose(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc *crtc;
+ struct drm_mode_set set;
+ int ret;
+
+ /**
+ * Do nothing on the lastclose call from drm_unload.
+ */
+
+ if (!dev_priv->is_opened)
+ return;
+
+ dev_priv->is_opened = false;
+ set.x = 0;
+ set.y = 0;
+ set.fb = NULL;
+ set.mode = NULL;
+ set.connectors = NULL;
+ set.num_connectors = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ set.crtc = crtc;
+ ret = crtc->funcs->set_config(&set);
+ WARN_ON(ret != 0);
+ }
+
+}
+
+static void vmw_master_init(struct vmw_master *vmaster)
+{
+ ttm_lock_init(&vmaster->lock);
+}
+
+static int vmw_master_create(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster;
+
+ DRM_INFO("Master create.\n");
+ vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
+ if (unlikely(vmaster == NULL))
+ return -ENOMEM;
+
+ ttm_lock_init(&vmaster->lock);
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ master->driver_priv = vmaster;
+
+ return 0;
+}
+
+static void vmw_master_destroy(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster = vmw_master(master);
+
+ DRM_INFO("Master destroy.\n");
+ master->driver_priv = NULL;
+ kfree(vmaster);
+}
+
+
+static int vmw_master_set(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_open)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *active = dev_priv->active_master;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret = 0;
+
+ DRM_INFO("Master set.\n");
+
+ if (active) {
+ BUG_ON(active != &dev_priv->fbdev_master);
+ ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
+ if (unlikely(ret != 0))
+ goto out_no_active_lock;
+
+ ttm_lock_set_kill(&active->lock, true, SIGTERM);
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to clean VRAM on "
+ "master drop.\n");
+ }
+
+ dev_priv->active_master = NULL;
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+ if (!from_open) {
+ ttm_vt_unlock(&vmaster->lock);
+ BUG_ON(vmw_fp->locked_master != file_priv->master);
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ dev_priv->active_master = vmaster;
+
+ return 0;
+
+out_no_active_lock:
+ vmw_release_device(dev_priv);
+ return ret;
+}
+
+static void vmw_master_drop(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_release)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ DRM_INFO("Master drop.\n");
+
+ /**
+ * Make sure the master doesn't disappear while we have
+ * it locked.
+ */
+
+ vmw_fp->locked_master = drm_master_get(file_priv->master);
+ ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+
+ if (unlikely((ret != 0))) {
+ DRM_ERROR("Unable to lock TTM at VT switch.\n");
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+
+ dev_priv->active_master = &dev_priv->fbdev_master;
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ ttm_vt_unlock(&dev_priv->fbdev_master.lock);
+
+ vmw_fb_on(dev_priv);
+}
+
+
+static void vmw_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+ void *ptr)
+{
+ struct vmw_private *dev_priv =
+ container_of(nb, struct vmw_private, pm_nb);
+ struct vmw_master *vmaster = dev_priv->active_master;
+
+ switch (val) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ttm_suspend_lock(&vmaster->lock);
+
+ /**
+ * This empties VRAM and unbinds all GMR bindings.
+ * Buffer contents is moved to swappable memory.
+ */
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ ttm_suspend_unlock(&vmaster->lock);
+ break;
+ case PM_RESTORE_PREPARE:
+ break;
+ case PM_POST_RESTORE:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * These might not be needed with the virtual SVGA device.
+ */
+
+int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int vmw_pci_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ return pci_enable_device(pdev);
+}
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_MODESET,
+ .load = vmw_driver_load,
+ .unload = vmw_driver_unload,
+ .firstopen = vmw_firstopen,
+ .lastclose = vmw_lastclose,
+ .irq_preinstall = vmw_irq_preinstall,
+ .irq_postinstall = vmw_irq_postinstall,
+ .irq_uninstall = vmw_irq_uninstall,
+ .irq_handler = vmw_irq_handler,
+ .reclaim_buffers_locked = NULL,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = vmw_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
+ .dma_quiescent = NULL, /*vmw_dma_quiescent, */
+ .master_create = vmw_master_create,
+ .master_destroy = vmw_master_destroy,
+ .master_set = vmw_master_set,
+ .master_drop = vmw_master_drop,
+ .open = vmw_driver_open,
+ .postclose = vmw_postclose,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .suspend = vmw_pci_suspend,
+ .resume = vmw_pci_resume
+ },
+ .name = VMWGFX_DRIVER_NAME,
+ .desc = VMWGFX_DRIVER_DESC,
+ .date = VMWGFX_DRIVER_DATE,
+ .major = VMWGFX_DRIVER_MAJOR,
+ .minor = VMWGFX_DRIVER_MINOR,
+ .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
+};
+
+static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+static int __init vmwgfx_init(void)
+{
+ int ret;
+ ret = drm_init(&driver);
+ if (ret)
+ DRM_ERROR("Failed initializing DRM.\n");
+ return ret;
+}
+
+static void __exit vmwgfx_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(vmwgfx_init);
+module_exit(vmwgfx_exit);
+
+MODULE_AUTHOR("VMware Inc. and others");
+MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 0000000..356dc93
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,521 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_DRV_H_
+#define _VMWGFX_DRV_H_
+
+#include "vmwgfx_reg.h"
+#include "drmP.h"
+#include "vmwgfx_drm.h"
+#include "drm_hashtab.h"
+#include "linux/suspend.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_MAJOR 1
+#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+#define VMWGFX_MAX_RELOCATIONS 2048
+#define VMWGFX_MAX_GMRS 2048
+
+struct vmw_fpriv {
+ struct drm_master *locked_master;
+ struct ttm_object_file *tfile;
+};
+
+struct vmw_dma_buffer {
+ struct ttm_buffer_object base;
+ struct list_head validate_list;
+ struct list_head gmr_lru;
+ uint32_t gmr_id;
+ bool gmr_bound;
+ uint32_t cur_validate_node;
+ bool on_validate_list;
+};
+
+struct vmw_resource {
+ struct kref kref;
+ struct vmw_private *dev_priv;
+ struct idr *idr;
+ int id;
+ enum ttm_object_type res_type;
+ bool avail;
+ void (*hw_destroy) (struct vmw_resource *res);
+ void (*res_free) (struct vmw_resource *res);
+
+ /* TODO is a generic snooper needed? */
+#if 0
+ void (*snoop)(struct vmw_resource *res,
+ struct ttm_object_file *tfile,
+ SVGA3dCmdHeader *header);
+ void *snoop_priv;
+#endif
+};
+
+struct vmw_cursor_snooper {
+ struct drm_crtc *crtc;
+ size_t age;
+ uint32_t *image;
+};
+
+struct vmw_surface {
+ struct vmw_resource res;
+ uint32_t flags;
+ uint32_t format;
+ uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size *sizes;
+ uint32_t num_sizes;
+
+ bool scanout;
+
+ /* TODO so far just a extra pointer */
+ struct vmw_cursor_snooper snooper;
+};
+
+struct vmw_fifo_state {
+ unsigned long reserved_size;
+ __le32 *dynamic_buffer;
+ __le32 *static_buffer;
+ __le32 *last_buffer;
+ uint32_t last_data_size;
+ uint32_t last_buffer_size;
+ bool last_buffer_add;
+ unsigned long static_buffer_size;
+ bool using_bounce_buffer;
+ uint32_t capabilities;
+ struct mutex fifo_mutex;
+ struct rw_semaphore rwsem;
+};
+
+struct vmw_relocation {
+ SVGAGuestPtr *location;
+ uint32_t index;
+};
+
+struct vmw_sw_context{
+ struct ida bo_list;
+ uint32_t last_cid;
+ bool cid_valid;
+ uint32_t last_sid;
+ uint32_t sid_translation;
+ bool sid_valid;
+ struct ttm_object_file *tfile;
+ struct list_head validate_nodes;
+ struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
+ uint32_t cur_reloc;
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
+ uint32_t cur_val_buf;
+};
+
+struct vmw_legacy_display;
+struct vmw_overlay;
+
+struct vmw_master {
+ struct ttm_lock lock;
+};
+
+struct vmw_private {
+ struct ttm_bo_device bdev;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+
+ struct vmw_fifo_state fifo;
+
+ struct drm_device *dev;
+ unsigned long vmw_chipset;
+ unsigned int io_start;
+ uint32_t vram_start;
+ uint32_t vram_size;
+ uint32_t mmio_start;
+ uint32_t mmio_size;
+ uint32_t fb_max_width;
+ uint32_t fb_max_height;
+ __le32 __iomem *mmio_virt;
+ int mmio_mtrr;
+ uint32_t capabilities;
+ uint32_t max_gmr_descriptors;
+ uint32_t max_gmr_ids;
+ struct mutex hw_mutex;
+
+ /*
+ * VGA registers.
+ */
+
+ uint32_t vga_width;
+ uint32_t vga_height;
+ uint32_t vga_depth;
+ uint32_t vga_bpp;
+ uint32_t vga_pseudo;
+ uint32_t vga_red_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_green_mask;
+
+ /*
+ * Framebuffer info.
+ */
+
+ void *fb_info;
+ struct vmw_legacy_display *ldu_priv;
+ struct vmw_overlay *overlay_priv;
+
+ /*
+ * Context and surface management.
+ */
+
+ rwlock_t resource_lock;
+ struct idr context_idr;
+ struct idr surface_idr;
+ struct idr stream_idr;
+
+ /*
+ * Block lastclose from racing with firstopen.
+ */
+
+ struct mutex init_mutex;
+
+ /*
+ * A resource manager for kernel-only surfaces and
+ * contexts.
+ */
+
+ struct ttm_object_device *tdev;
+
+ /*
+ * Fencing and IRQs.
+ */
+
+ atomic_t fence_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ atomic_t fence_queue_waiters;
+ atomic_t fifo_queue_waiters;
+ uint32_t last_read_sequence;
+ spinlock_t irq_lock;
+
+ /*
+ * Device state
+ */
+
+ uint32_t traces_state;
+ uint32_t enable_state;
+ uint32_t config_done_state;
+
+ /**
+ * Execbuf
+ */
+ /**
+ * Protected by the cmdbuf mutex.
+ */
+
+ struct vmw_sw_context ctx;
+ uint32_t val_seq;
+ struct mutex cmdbuf_mutex;
+
+ /**
+ * GMR management. Protected by the lru spinlock.
+ */
+
+ struct ida gmr_ida;
+ struct list_head gmr_lru;
+
+
+ /**
+ * Operating mode.
+ */
+
+ bool stealth;
+ bool is_opened;
+
+ /**
+ * Master management.
+ */
+
+ struct vmw_master *active_master;
+ struct vmw_master fbdev_master;
+ struct notifier_block pm_nb;
+};
+
+static inline struct vmw_private *vmw_priv(struct drm_device *dev)
+{
+ return (struct vmw_private *)dev->dev_private;
+}
+
+static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
+{
+ return (struct vmw_fpriv *)file_priv->driver_priv;
+}
+
+static inline struct vmw_master *vmw_master(struct drm_master *master)
+{
+ return (struct vmw_master *) master->driver_priv;
+}
+
+static inline void vmw_write(struct vmw_private *dev_priv,
+ unsigned int offset, uint32_t value)
+{
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+}
+
+static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+ unsigned int offset)
+{
+ uint32_t val;
+
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ return val;
+}
+
+/**
+ * GMR utilities - vmwgfx_gmr.c
+ */
+
+extern int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo);
+extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+extern void vmw_resource_unreference(struct vmw_resource **p_res);
+extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id);
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res));
+extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node);
+extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
+extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_dma_buffer **out);
+extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
+extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
+extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
+extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id,
+ struct vmw_resource **out);
+
+
+/**
+ * Misc Ioctl functionality - vmwgfx_ioctl.c
+ */
+
+extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * Fifo utilities - vmwgfx_fifo.c
+ */
+
+extern int vmw_fifo_init(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void vmw_fifo_release(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
+ uint32_t *sequence);
+extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
+extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+
+/**
+ * TTM glue - vmwgfx_ttm_glue.c
+ */
+
+extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
+extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
+extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * TTM buffer object driver - vmwgfx_buffer.c
+ */
+
+extern struct ttm_placement vmw_vram_placement;
+extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_bo_driver vmw_bo_driver;
+extern int vmw_dma_quiescent(struct drm_device *dev);
+
+/**
+ * Command submission - vmwgfx_execbuf.c
+ */
+
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * IRQs and wating - vmwgfx_irq.c
+ */
+
+extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
+ uint32_t sequence, bool interruptible,
+ unsigned long timeout);
+extern void vmw_irq_preinstall(struct drm_device *dev);
+extern int vmw_irq_postinstall(struct drm_device *dev);
+extern void vmw_irq_uninstall(struct drm_device *dev);
+extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence);
+extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout);
+
+/**
+ * Kernel framebuffer - vmwgfx_fb.c
+ */
+
+int vmw_fb_init(struct vmw_private *vmw_priv);
+int vmw_fb_close(struct vmw_private *dev_priv);
+int vmw_fb_off(struct vmw_private *vmw_priv);
+int vmw_fb_on(struct vmw_private *vmw_priv);
+
+/**
+ * Kernel modesetting - vmwgfx_kms.c
+ */
+
+int vmw_kms_init(struct vmw_private *dev_priv);
+int vmw_kms_close(struct vmw_private *dev_priv);
+int vmw_kms_save_vga(struct vmw_private *vmw_priv);
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header);
+
+/**
+ * Overlay control - vmwgfx_overlay.c
+ */
+
+int vmw_overlay_init(struct vmw_private *dev_priv);
+int vmw_overlay_close(struct vmw_private *dev_priv);
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vmw_overlay_stop_all(struct vmw_private *dev_priv);
+int vmw_overlay_resume_all(struct vmw_private *dev_priv);
+int vmw_overlay_pause_all(struct vmw_private *dev_priv);
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
+
+/**
+ * Inline helper functions
+ */
+
+static inline void vmw_surface_unreference(struct vmw_surface **srf)
+{
+ struct vmw_surface *tmp_srf = *srf;
+ struct vmw_resource *res = &tmp_srf->res;
+ *srf = NULL;
+
+ vmw_resource_unreference(&res);
+}
+
+static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
+{
+ (void) vmw_resource_reference(&srf->res);
+ return srf;
+}
+
+static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+{
+ struct vmw_dma_buffer *tmp_buf = *buf;
+ struct ttm_buffer_object *bo = &tmp_buf->base;
+ *buf = NULL;
+
+ ttm_bo_unref(&bo);
+}
+
+static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+{
+ if (ttm_bo_reference(&buf->base))
+ return buf;
+ return NULL;
+}
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 0000000..0897359
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,716 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_reg.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_placement.h"
+
+static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+}
+
+static int vmw_cmd_ok(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return 0;
+}
+
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
+ return 0;
+
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use context %u\n",
+ (unsigned) cmd->cid);
+ return ret;
+ }
+
+ sw_context->last_cid = cmd->cid;
+ sw_context->cid_valid = true;
+
+ return 0;
+}
+
+static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t *sid)
+{
+ if (*sid == SVGA3D_INVALID_ID)
+ return 0;
+
+ if (unlikely((!sw_context->sid_valid ||
+ *sid != sw_context->last_sid))) {
+ int real_id;
+ int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+ *sid, &real_id);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
+
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ *sid = real_id;
+ sw_context->sid_translation = real_id;
+ } else
+ *sid = sw_context->sid_translation;
+
+ return 0;
+}
+
+
+static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ return ret;
+}
+
+static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceStretchBlt body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+}
+
+static int vmw_cmd_present_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+}
+
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAGuestPtr *ptr,
+ struct vmw_dma_buffer **vmw_bo_p)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ uint32_t handle = ptr->gmrId;
+ struct vmw_relocation *reloc;
+ uint32_t cur_validate_node;
+ struct ttm_validate_buffer *val_buf;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use GMR region.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number relocations per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->location = ptr;
+
+ cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+ if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc->index = cur_validate_node;
+ if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
+ val_buf = &sw_context->val_bufs[cur_validate_node];
+ val_buf->bo = ttm_bo_reference(bo);
+ val_buf->new_sync_obj_arg = (void *) dev_priv;
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ struct vmw_surface *srf = NULL;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bo = &vmw_bo->base;
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
+ if (ret) {
+ DRM_ERROR("could not find surface\n");
+ goto out_no_reloc;
+ }
+
+ /**
+ * Patch command stream with device SID.
+ */
+
+ cmd->dma.host.sid = srf->res.id;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+ vmw_surface_unreference(&srf);
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDrawPrimitives body;
+ } *cmd;
+ SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+ (unsigned long)header + sizeof(*cmd));
+ SVGA3dPrimitiveRange *range;
+ uint32_t i;
+ uint32_t maxnum;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_draw_cmd, header);
+ maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+ if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+ DRM_ERROR("Illegal number of vertex declarations.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &decl->array.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ maxnum = (header->size - sizeof(cmd->body) -
+ cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+ if (unlikely(cmd->body.numRanges > maxnum)) {
+ DRM_ERROR("Illegal number of index ranges.\n");
+ return -EINVAL;
+ }
+
+ range = (SVGA3dPrimitiveRange *) decl;
+ for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &range->indexArray.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_tex_state_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetTextureState state;
+ };
+
+ SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+ ((unsigned long) header + header->size + sizeof(header));
+ SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (; cur_state < last_state; ++cur_state) {
+ if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+ continue;
+
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &cur_state->value);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+
+typedef int (*vmw_cmd_func) (struct vmw_private *,
+ struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+
+#define VMW_CMD_DEF(cmd, func) \
+ [cmd - SVGA_3D_CMD_BASE] = func
+
+static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+ &vmw_cmd_set_render_target_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
+ &vmw_cmd_blt_surf_screen_check)
+};
+
+static int vmw_cmd_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t *size)
+{
+ uint32_t cmd_id;
+ uint32_t size_remaining = *size;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ int ret;
+
+ cmd_id = ((uint32_t *)buf)[0];
+ if (cmd_id == SVGA_CMD_UPDATE) {
+ *size = 5 << 2;
+ return 0;
+ }
+
+ cmd_id = le32_to_cpu(header->id);
+ *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ if (unlikely(*size > size_remaining))
+ goto out_err;
+
+ if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
+ goto out_err;
+
+ ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ return 0;
+out_err:
+ DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+}
+
+static int vmw_cmd_check_all(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t size)
+{
+ int32_t cur_size = size;
+ int ret;
+
+ while (cur_size > 0) {
+ size = cur_size;
+ ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
+ if (unlikely(ret != 0))
+ return ret;
+ buf = (void *)((unsigned long) buf + size);
+ cur_size -= size;
+ }
+
+ if (unlikely(cur_size != 0)) {
+ DRM_ERROR("Command verifier out of sync.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vmw_free_relocations(struct vmw_sw_context *sw_context)
+{
+ sw_context->cur_reloc = 0;
+}
+
+static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
+{
+ uint32_t i;
+ struct vmw_relocation *reloc;
+ struct ttm_validate_buffer *validate;
+ struct ttm_buffer_object *bo;
+
+ for (i = 0; i < sw_context->cur_reloc; ++i) {
+ reloc = &sw_context->relocs[i];
+ validate = &sw_context->val_bufs[reloc->index];
+ bo = validate->bo;
+ reloc->location->offset += bo->offset;
+ reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+ }
+ vmw_free_relocations(sw_context);
+}
+
+static void vmw_clear_validations(struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
+ head) {
+ list_del(&entry->head);
+ vmw_dmabuf_validate_clear(entry->bo);
+ ttm_bo_unref(&entry->bo);
+ sw_context->cur_val_buf--;
+ }
+ BUG_ON(sw_context->cur_val_buf != 0);
+}
+
+static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
+ /**
+ * Put BO in VRAM, only if there is space.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ if (unlikely(ret == -ERESTARTSYS))
+ return ret;
+
+ /**
+ * Otherwise, set it up as GMR.
+ */
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
+ ret = vmw_gmr_bind(dev_priv, bo);
+ if (likely(ret == 0 || ret == -ERESTARTSYS))
+ return ret;
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ return ret;
+}
+
+
+static int vmw_validate_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+ list_for_each_entry(entry, &sw_context->validate_nodes, head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_fence_rep fence_rep;
+ struct drm_vmw_fence_rep __user *user_fence_rep;
+ int ret;
+ void *user_cmd;
+ void *cmd;
+ uint32_t sequence;
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_cmd_mutex;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_cmd = (void __user *)(unsigned long)arg->commands;
+ ret = copy_from_user(cmd, user_cmd, arg->command_size);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_commit;
+ }
+
+ sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->cid_valid = false;
+ sw_context->sid_valid = false;
+ sw_context->cur_reloc = 0;
+ sw_context->cur_val_buf = 0;
+
+ INIT_LIST_HEAD(&sw_context->validate_nodes);
+
+ ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
+ dev_priv->val_seq++);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_validate_buffers(dev_priv, sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ vmw_apply_relocations(sw_context);
+ vmw_fifo_commit(dev_priv, arg->command_size);
+
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
+
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *)(unsigned long) sequence);
+ vmw_clear_validations(sw_context);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * This error is harmless, because if fence submission fails,
+ * vmw_fifo_send_fence will sync.
+ */
+
+ if (ret != 0)
+ DRM_ERROR("Fence submission error. Syncing.\n");
+
+ fence_rep.error = ret;
+ fence_rep.fence_seq = (uint64_t) sequence;
+
+ user_fence_rep = (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
+
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in.
+ */
+
+ ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_err:
+ vmw_free_relocations(sw_context);
+ ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_clear_validations(sw_context);
+out_commit:
+ vmw_fifo_commit(dev_priv, 0);
+out_unlock:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+out_no_cmd_mutex:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 0000000..a933670
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,737 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 David Airlie
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#define VMW_DIRTY_DELAY (HZ / 30)
+
+struct vmw_fb_par {
+ struct vmw_private *vmw_priv;
+
+ void *vmalloc;
+
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_bo_kmap_obj map;
+
+ u32 pseudo_palette[17];
+
+ unsigned depth;
+ unsigned bpp;
+
+ unsigned max_width;
+ unsigned max_height;
+
+ void *bo_ptr;
+ unsigned bo_size;
+ bool bo_iowrite;
+
+ struct {
+ spinlock_t lock;
+ bool active;
+ unsigned x1;
+ unsigned y1;
+ unsigned x2;
+ unsigned y2;
+ } dirty;
+};
+
+static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ u32 *pal = par->pseudo_palette;
+
+ if (regno > 15) {
+ DRM_ERROR("Bad regno %u.\n", regno);
+ return 1;
+ }
+
+ switch (par->depth) {
+ case 24:
+ case 32:
+ pal[regno] = ((red & 0xff00) << 8) |
+ (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+ break;
+ default:
+ DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ int depth = var->bits_per_pixel;
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ switch (var->bits_per_pixel) {
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (depth) {
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ DRM_ERROR("Bad depth %u.\n", depth);
+ return -EINVAL;
+ }
+
+ /* without multimon its hard to resize */
+ if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
+ (var->xres != par->max_width ||
+ var->yres != par->max_height)) {
+ DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ return -EINVAL;
+ }
+
+ if (var->xres > par->max_width ||
+ var->yres > par->max_height) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ /* TODO check if pitch and offset changes */
+
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ } else {
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
+
+ /* TODO check if pitch and offset changes */
+ }
+
+ return 0;
+}
+
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static int vmw_fb_blank(int blank, struct fb_info *info)
+{
+ return 0;
+}
+
+/*
+ * Dirty code
+ */
+
+static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+{
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ struct fb_info *info = vmw_priv->fb_info;
+ int stride = (info->fix.line_length / 4);
+ int *src = (int *)info->screen_base;
+ __le32 __iomem *vram_mem = par->bo_ptr;
+ unsigned long flags;
+ unsigned x, y, w, h;
+ int i, k;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (!par->dirty.active) {
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ return;
+ }
+ x = par->dirty.x1;
+ y = par->dirty.y1;
+ w = min(par->dirty.x2, info->var.xres) - x;
+ h = min(par->dirty.y2, info->var.yres) - y;
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
+ for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
+ iowrite32(src[k], vram_mem + k);
+ }
+
+#if 0
+ DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
+#endif
+
+ cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return;
+ }
+
+ cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd->body.x = cpu_to_le32(x);
+ cmd->body.y = cpu_to_le32(y);
+ cmd->body.width = cpu_to_le32(w);
+ cmd->body.height = cpu_to_le32(h);
+ vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+}
+
+static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+ unsigned x1, unsigned y1,
+ unsigned width, unsigned height)
+{
+ struct fb_info *info = par->vmw_priv->fb_info;
+ unsigned long flags;
+ unsigned x2 = x1 + width;
+ unsigned y2 = y1 + height;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (par->dirty.x1 == par->dirty.x2) {
+ par->dirty.x1 = x1;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = x2;
+ par->dirty.y2 = y2;
+ /* if we are active start the dirty work
+ * we share the work with the defio system */
+ if (par->dirty.active)
+ schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+ } else {
+ if (x1 < par->dirty.x1)
+ par->dirty.x1 = x1;
+ if (y1 < par->dirty.y1)
+ par->dirty.y1 = y1;
+ if (x2 > par->dirty.x2)
+ par->dirty.x2 = x2;
+ if (y2 > par->dirty.y2)
+ par->dirty.y2 = y2;
+ }
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+}
+
+static void vmw_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct vmw_fb_par *par = info->par;
+ unsigned long start, end, min, max;
+ unsigned long flags;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.x1 = 0;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = info->var.xres;
+ par->dirty.y2 = y2;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ }
+
+ vmw_fb_dirty_flush(par);
+};
+
+struct fb_deferred_io vmw_defio = {
+ .delay = VMW_DIRTY_DELAY,
+ .deferred_io = vmw_deferred_io,
+};
+
+/*
+ * Draw code
+ */
+
+static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+ vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+ rect->width, rect->height);
+}
+
+static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+ vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+ region->width, region->height);
+}
+
+static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+ vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+ image->width, image->height);
+}
+
+/*
+ * Bring up code
+ */
+
+static struct fb_ops vmw_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vmw_fb_check_var,
+ .fb_set_par = vmw_fb_set_par,
+ .fb_setcolreg = vmw_fb_setcolreg,
+ .fb_fillrect = vmw_fb_fillrect,
+ .fb_copyarea = vmw_fb_copyarea,
+ .fb_imageblit = vmw_fb_imageblit,
+ .fb_pan_display = vmw_fb_pan_display,
+ .fb_blank = vmw_fb_blank,
+};
+
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+ size_t size, struct vmw_dma_buffer **out)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret;
+
+ ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+ if (!vmw_bo)
+ goto err_unlock;
+
+ ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ &ne_placement,
+ false,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto err_unlock; /* init frees the buffer on failure */
+
+ *out = vmw_bo;
+
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+
+ return 0;
+
+err_unlock:
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ return ret;
+}
+
+int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+ struct device *device = &vmw_priv->dev->pdev->dev;
+ struct vmw_fb_par *par;
+ struct fb_info *info;
+ unsigned initial_width, initial_height;
+ unsigned fb_width, fb_height;
+ unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
+ int ret;
+
+ initial_width = 800;
+ initial_height = 600;
+
+ fb_bbp = 32;
+ fb_depth = 24;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+ } else {
+ fb_width = min(vmw_priv->fb_max_width, initial_width);
+ fb_height = min(vmw_priv->fb_max_height, initial_height);
+ }
+
+ initial_width = min(fb_width, initial_width);
+ initial_height = min(fb_height, initial_height);
+
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
+ fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
+
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
+ DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
+ DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
+ DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
+ DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
+ DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
+ DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
+ DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
+ DRM_DEBUG("fb_pitch %u\n", fb_pitch);
+ DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
+
+ info = framebuffer_alloc(sizeof(*par), device);
+ if (!info)
+ return -ENOMEM;
+
+ /*
+ * Par
+ */
+ vmw_priv->fb_info = info;
+ par = info->par;
+ par->vmw_priv = vmw_priv;
+ par->depth = fb_depth;
+ par->bpp = fb_bbp;
+ par->vmalloc = NULL;
+ par->max_width = fb_width;
+ par->max_height = fb_height;
+
+ /*
+ * Create buffers and alloc memory
+ */
+ par->vmalloc = vmalloc(fb_size);
+ if (unlikely(par->vmalloc == NULL)) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
+ if (unlikely(ret != 0))
+ goto err_free;
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ if (unlikely(ret != 0))
+ goto err_unref;
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ par->bo_size = fb_size;
+
+ /*
+ * Fixed and var
+ */
+ strcpy(info->fix.id, "svgadrmfb");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.line_length = fb_pitch;
+
+ info->fix.smem_start = 0;
+ info->fix.smem_len = fb_size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ info->pseudo_palette = par->pseudo_palette;
+ info->screen_base = par->vmalloc;
+ info->screen_size = fb_size;
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &vmw_fb_ops;
+
+ /* 24 depth per default */
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+
+ info->var.xres_virtual = fb_width;
+ info->var.yres_virtual = fb_height;
+ info->var.bits_per_pixel = par->bpp;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ info->var.xres = initial_width;
+ info->var.yres = initial_height;
+
+#if 0
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#else
+ info->pixmap.size = 0;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#endif
+
+ info->aperture_base = vmw_priv->vram_start;
+ info->aperture_size = vmw_priv->vram_size;
+
+ /*
+ * Dirty & Deferred IO
+ */
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y1 = 0;
+ par->dirty.active = true;
+ spin_lock_init(&par->dirty.lock);
+ info->fbdefio = &vmw_defio;
+ fb_deferred_io_init(info);
+
+ ret = register_framebuffer(info);
+ if (unlikely(ret != 0))
+ goto err_defio;
+
+ return 0;
+
+err_defio:
+ fb_deferred_io_cleanup(info);
+ ttm_bo_kunmap(&par->map);
+err_unref:
+ ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
+err_free:
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+ vmw_priv->fb_info = NULL;
+
+ return ret;
+}
+
+int vmw_fb_close(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ struct ttm_buffer_object *bo;
+
+ if (!vmw_priv->fb_info)
+ return 0;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+ bo = &par->vmw_bo->base;
+ par->vmw_bo = NULL;
+
+ /* ??? order */
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+
+ ttm_bo_kunmap(&par->map);
+ ttm_bo_unref(&bo);
+
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ int ret = 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret = 0;
+
+ ne_placement.lpfn = bo->num_pages;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmw_priv->active_master->lock);
+
+ return ret;
+}
+
+int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = false;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ flush_scheduled_work();
+
+ par->bo_ptr = NULL;
+ ttm_bo_kunmap(&par->map);
+
+ vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+
+ return 0;
+}
+
+int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+ bool dummy;
+ int ret;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ /* we are already active */
+ if (par->bo_ptr != NULL)
+ return 0;
+
+ /* Make sure that all overlays are stoped when we take over */
+ vmw_overlay_stop_all(vmw_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("could not move buffer to start of VRAM\n");
+ goto err_no_buffer;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ BUG_ON(ret != 0);
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = true;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+err_no_buffer:
+ vmw_fb_set_par(info);
+
+ vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
+
+ /* If there already was stuff dirty we wont
+ * schedule a new work, so lets do it now */
+ schedule_delayed_work(&info->deferred_work, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 0000000..39d43a0
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,538 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_placement.h"
+
+bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t fifo_min, hwversion;
+
+ fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
+ return false;
+
+ hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ if (hwversion == 0)
+ return false;
+
+ if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+ return false;
+
+ return true;
+}
+
+int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t dummy;
+ int ret;
+
+ fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->static_buffer = vmalloc(fifo->static_buffer_size);
+ if (unlikely(fifo->static_buffer == NULL))
+ return -ENOMEM;
+
+ fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->last_data_size = 0;
+ fifo->last_buffer_add = false;
+ fifo->last_buffer = vmalloc(fifo->last_buffer_size);
+ if (unlikely(fifo->last_buffer == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ fifo->dynamic_buffer = NULL;
+ fifo->reserved_size = 0;
+ fifo->using_bounce_buffer = false;
+
+ mutex_init(&fifo->fifo_mutex);
+ init_rwsem(&fifo->rwsem);
+
+ /*
+ * Allow mapping the first page read-only to user-space.
+ */
+
+ DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+ DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+ DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+ mutex_lock(&dev_priv->hw_mutex);
+ dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+ dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ min = 4;
+ if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
+ min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
+ min <<= 2;
+
+ if (min < PAGE_SIZE)
+ min = PAGE_SIZE;
+
+ iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
+ iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ wmb();
+ iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
+ iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
+ mb();
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+
+ DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
+ (unsigned int) max,
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+ atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
+ iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
+
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+out_err:
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ return ret;
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
+ iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+ }
+
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
+ dev_priv->config_done_state);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ dev_priv->enable_state);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (likely(fifo->last_buffer != NULL)) {
+ vfree(fifo->last_buffer);
+ fifo->last_buffer = NULL;
+ }
+
+ if (likely(fifo->static_buffer != NULL)) {
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ }
+
+ if (likely(fifo->dynamic_buffer != NULL)) {
+ vfree(fifo->dynamic_buffer);
+ fifo->dynamic_buffer = NULL;
+ }
+}
+
+static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+
+ return ((max - next_cmd) + (stop - min) <= bytes);
+}
+
+static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ int ret = 0;
+ unsigned long end_jiffies = jiffies + timeout;
+ DEFINE_WAIT(__wait);
+
+ DRM_INFO("Fifo wait noirq.\n");
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fifo_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (!vmw_fifo_is_full(dev_priv, bytes))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ ret = -EBUSY;
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ schedule_timeout(1);
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fifo_queue, &__wait);
+ wake_up_all(&dev_priv->fifo_queue);
+ DRM_INFO("Fifo noirq exit.\n");
+ return ret;
+}
+
+static int vmw_fifo_wait(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ long ret = 1L;
+ unsigned long irq_flags;
+
+ if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fifo_wait_noirq(dev_priv, bytes,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_FIFO_PROGRESS,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t next_cmd;
+ uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ int ret;
+
+ mutex_lock(&fifo_state->fifo_mutex);
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+
+ if (unlikely(bytes >= (max - min)))
+ goto out_err;
+
+ BUG_ON(fifo_state->reserved_size != 0);
+ BUG_ON(fifo_state->dynamic_buffer != NULL);
+
+ fifo_state->reserved_size = bytes;
+
+ while (1) {
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+ bool need_bounce = false;
+ bool reserve_in_place = false;
+
+ if (next_cmd >= stop) {
+ if (likely((next_cmd + bytes < max ||
+ (next_cmd + bytes == max && stop > min))))
+ reserve_in_place = true;
+
+ else if (vmw_fifo_is_full(dev_priv, bytes)) {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else
+ need_bounce = true;
+
+ } else {
+
+ if (likely((next_cmd + bytes < stop)))
+ reserve_in_place = true;
+ else {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ if (reserve_in_place) {
+ if (reserveable || bytes <= sizeof(uint32_t)) {
+ fifo_state->using_bounce_buffer = false;
+
+ if (reserveable)
+ iowrite32(bytes, fifo_mem +
+ SVGA_FIFO_RESERVED);
+ return fifo_mem + (next_cmd >> 2);
+ } else {
+ need_bounce = true;
+ }
+ }
+
+ if (need_bounce) {
+ fifo_state->using_bounce_buffer = true;
+ if (bytes < fifo_state->static_buffer_size)
+ return fifo_state->static_buffer;
+ else {
+ fifo_state->dynamic_buffer = vmalloc(bytes);
+ return fifo_state->dynamic_buffer;
+ }
+ }
+ }
+out_err:
+ fifo_state->reserved_size = 0;
+ mutex_unlock(&fifo_state->fifo_mutex);
+ return NULL;
+}
+
+static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t chunk_size = max - next_cmd;
+ uint32_t rest;
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ if (bytes < chunk_size)
+ chunk_size = bytes;
+
+ iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+ rest = bytes - chunk_size;
+ if (rest)
+ memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
+ rest);
+}
+
+static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ while (bytes > 0) {
+ iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
+ next_cmd += sizeof(uint32_t);
+ if (unlikely(next_cmd == max))
+ next_cmd = min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ mb();
+ bytes -= sizeof(uint32_t);
+ }
+}
+
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+
+ BUG_ON((bytes & 3) != 0);
+ BUG_ON(bytes > fifo_state->reserved_size);
+
+ fifo_state->reserved_size = 0;
+
+ if (fifo_state->using_bounce_buffer) {
+ if (reserveable)
+ vmw_fifo_res_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+ else
+ vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+
+ if (fifo_state->dynamic_buffer) {
+ vfree(fifo_state->dynamic_buffer);
+ fifo_state->dynamic_buffer = NULL;
+ }
+
+ }
+
+ down_write(&fifo_state->rwsem);
+ if (fifo_state->using_bounce_buffer || reserveable) {
+ next_cmd += bytes;
+ if (next_cmd >= max)
+ next_cmd -= max - min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ }
+
+ if (reserveable)
+ iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ up_write(&fifo_state->rwsem);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ mutex_unlock(&fifo_state->fifo_mutex);
+}
+
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct svga_fifo_cmd_fence *cmd_fence;
+ void *fm;
+ int ret = 0;
+ uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+ *sequence = atomic_read(&dev_priv->fence_seq);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ false, 3*HZ);
+ goto out_err;
+ }
+
+ do {
+ *sequence = atomic_add_return(1, &dev_priv->fence_seq);
+ } while (*sequence == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+
+ /*
+ * Don't request hardware to send a fence. The
+ * waiting code in vmwgfx_irq.c will emulate this.
+ */
+
+ vmw_fifo_commit(dev_priv, 0);
+ return 0;
+ }
+
+ *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
+ cmd_fence = (struct svga_fifo_cmd_fence *)
+ ((unsigned long)fm + sizeof(__le32));
+
+ iowrite32(*sequence, &cmd_fence->fence);
+ fifo_state->last_buffer_add = true;
+ vmw_fifo_commit(dev_priv, bytes);
+ fifo_state->last_buffer_add = false;
+
+out_err:
+ return ret;
+}
+
+/**
+ * Map the first page of the FIFO read-only to user-space.
+ */
+
+static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int ret;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+
+ if (address != vma->vm_start)
+ return VM_FAULT_SIGBUS;
+
+ ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
+ if (likely(ret == -EBUSY || ret == 0))
+ return VM_FAULT_NOPAGE;
+ else if (ret == -ENOMEM)
+ return VM_FAULT_OOM;
+
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct vmw_fifo_vm_ops = {
+ .fault = vmw_fifo_vm_fault,
+ .open = NULL,
+ .close = NULL
+};
+
+int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+
+ if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
+ (vma->vm_end - vma->vm_start) != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
+ vma->vm_page_prot);
+ vma->vm_ops = &vmw_fifo_vm_ops;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 0000000..5f8908a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_bo_driver.h"
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+ struct page *pages[],
+ unsigned long num_pages)
+{
+ struct page *page, *next;
+ struct svga_guest_mem_descriptor *page_virtual = NULL;
+ struct svga_guest_mem_descriptor *desc_virtual = NULL;
+ unsigned int desc_per_page;
+ unsigned long prev_pfn;
+ unsigned long pfn;
+ int ret;
+
+ desc_per_page = PAGE_SIZE /
+ sizeof(struct svga_guest_mem_descriptor) - 1;
+
+ while (likely(num_pages != 0)) {
+ page = alloc_page(__GFP_HIGHMEM);
+ if (unlikely(page == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ list_add_tail(&page->lru, desc_pages);
+
+ /*
+ * Point previous page terminating descriptor to this
+ * page before unmapping it.
+ */
+
+ if (likely(page_virtual != NULL)) {
+ desc_virtual->ppn = page_to_pfn(page);
+ kunmap_atomic(page_virtual, KM_USER0);
+ }
+
+ page_virtual = kmap_atomic(page, KM_USER0);
+ desc_virtual = page_virtual - 1;
+ prev_pfn = ~(0UL);
+
+ while (likely(num_pages != 0)) {
+ pfn = page_to_pfn(*pages);
+
+ if (pfn != prev_pfn + 1) {
+
+ if (desc_virtual - page_virtual ==
+ desc_per_page - 1)
+ break;
+
+ (++desc_virtual)->ppn = cpu_to_le32(pfn);
+ desc_virtual->num_pages = cpu_to_le32(1);
+ } else {
+ uint32_t tmp =
+ le32_to_cpu(desc_virtual->num_pages);
+ desc_virtual->num_pages = cpu_to_le32(tmp + 1);
+ }
+ prev_pfn = pfn;
+ --num_pages;
+ ++pages;
+ }
+
+ (++desc_virtual)->ppn = cpu_to_le32(0);
+ desc_virtual->num_pages = cpu_to_le32(0);
+ }
+
+ if (likely(page_virtual != NULL))
+ kunmap_atomic(page_virtual, KM_USER0);
+
+ return 0;
+out_err:
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+ return ret;
+}
+
+static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+}
+
+static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
+ int gmr_id, struct list_head *desc_pages)
+{
+ struct page *page;
+
+ if (unlikely(list_empty(desc_pages)))
+ return;
+
+ page = list_entry(desc_pages->next, struct page, lru);
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+ mb();
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+}
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
+ unsigned long num_pages)
+{
+ unsigned long prev_pfn = ~(0UL);
+ unsigned long pfn;
+ unsigned long descriptors = 0;
+
+ while (num_pages--) {
+ pfn = page_to_pfn(*pages++);
+ if (prev_pfn + 1 != pfn)
+ ++descriptors;
+ prev_pfn = pfn;
+ }
+
+ return descriptors;
+}
+
+int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ unsigned long descriptors;
+ int ret;
+ uint32_t id;
+ struct list_head desc_pages;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+ return -EINVAL;
+
+ ret = ttm_tt_populate(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
+ if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&desc_pages);
+ ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
+ ttm->num_pages);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_gmr_id_alloc(dev_priv, &id);
+ if (unlikely(ret != 0))
+ goto out_no_id;
+
+ vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+ vmw_gmr_free_descriptors(&desc_pages);
+ vmw_dmabuf_set_gmr(bo, id);
+ return 0;
+
+out_no_id:
+ vmw_gmr_free_descriptors(&desc_pages);
+ return ret;
+}
+
+void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
+ mb();
+ mutex_unlock(&dev_priv->hw_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 0000000..1c7a316
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,87 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+
+int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_getparam_arg *param =
+ (struct drm_vmw_getparam_arg *)data;
+
+ switch (param->param) {
+ case DRM_VMW_PARAM_NUM_STREAMS:
+ param->value = vmw_overlay_num_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_NUM_FREE_STREAMS:
+ param->value = vmw_overlay_num_free_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_3D:
+ param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+ break;
+ case DRM_VMW_PARAM_FIFO_OFFSET:
+ param->value = dev_priv->mmio_start;
+ break;
+ case DRM_VMW_PARAM_HW_CAPS:
+ param->value = dev_priv->capabilities;
+ break;
+ case DRM_VMW_PARAM_FIFO_CAPS:
+ param->value = dev_priv->fifo.capabilities;
+ break;
+ default:
+ DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct drm_vmw_fifo_debug_arg *arg =
+ (struct drm_vmw_fifo_debug_arg *)data;
+ __le32 __user *buffer = (__le32 __user *)
+ (unsigned long)arg->debug_buffer;
+
+ if (unlikely(fifo_state->last_buffer == NULL))
+ return -EINVAL;
+
+ if (arg->debug_buffer_size < fifo_state->last_data_size) {
+ arg->used_size = arg->debug_buffer_size;
+ arg->did_not_fit = 1;
+ } else {
+ arg->used_size = fifo_state->last_data_size;
+ arg->did_not_fit = 0;
+ }
+ return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 0000000..4d7cb53
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,286 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 24)
+
+irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ spin_lock(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ spin_unlock(&dev_priv->irq_lock);
+
+ if (status & SVGA_IRQFLAG_ANY_FENCE)
+ wake_up_all(&dev_priv->fence_queue);
+ if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ wake_up_all(&dev_priv->fifo_queue);
+
+ if (likely(status)) {
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
+{
+ uint32_t busy;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ busy = vmw_read(dev_priv, SVGA_REG_BUSY);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (busy == 0);
+}
+
+
+bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ struct vmw_fifo_state *fifo_state;
+ bool ret;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ fifo_state = &dev_priv->fifo;
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+ vmw_fifo_idle(dev_priv, sequence))
+ return true;
+
+ /**
+ * Then check if the sequence is higher than what we've actually
+ * emitted. Then the fence is stale and signaled.
+ */
+
+ ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ > VMW_FENCE_WRAP);
+
+ return ret;
+}
+
+int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+
+ uint32_t count = 0;
+ uint32_t signal_seq;
+ int ret;
+ unsigned long end_jiffies = jiffies + timeout;
+ bool (*wait_condition)(struct vmw_private *, uint32_t);
+ DEFINE_WAIT(__wait);
+
+ wait_condition = (fifo_idle) ? &vmw_fifo_idle :
+ &vmw_fence_signaled;
+
+ /**
+ * Block command submission while waiting for idle.
+ */
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+ signal_seq = atomic_read(&dev_priv->fence_seq);
+ ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fence_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (wait_condition(dev_priv, sequence))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ if (lazy)
+ schedule_timeout(1);
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+ * newer kernels and lower CPU utilization.
+ */
+
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ __set_current_state((interruptible) ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fence_queue, &__wait);
+ if (ret == 0 && fifo_idle) {
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+ }
+ wake_up_all(&dev_priv->fence_queue);
+ if (fifo_idle)
+ up_read(&fifo_state->rwsem);
+
+ return ret;
+}
+
+int vmw_wait_fence(struct vmw_private *dev_priv,
+ bool lazy, uint32_t sequence,
+ bool interruptible, unsigned long timeout)
+{
+ long ret;
+ unsigned long irq_flags;
+ struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return 0;
+
+ if (likely(vmw_fence_signaled(dev_priv, sequence)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+ if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
+ return vmw_fallback_wait(dev_priv, lazy, true, sequence,
+ interruptible, timeout);
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fallback_wait(dev_priv, lazy, false, sequence,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void vmw_irq_preinstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+int vmw_irq_postinstall(struct drm_device *dev)
+{
+ return 0;
+}
+
+void vmw_irq_uninstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
+
+int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
+ }
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
+ return -EBUSY;
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+ return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 0000000..31f9afe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,880 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+/* Might need a hrtimer here? */
+#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+
+void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+{
+ if (du->cursor_surface)
+ vmw_surface_unreference(&du->cursor_surface);
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_crtc_cleanup(&du->crtc);
+ drm_encoder_cleanup(&du->encoder);
+ drm_connector_cleanup(&du->connector);
+}
+
+/*
+ * Display Unit Cursor functions
+ */
+
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+ } *cmd;
+ u32 image_size = width * height * 4;
+ u32 cmd_size = sizeof(*cmd) + image_size;
+
+ if (!image)
+ return -EINVAL;
+
+ cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
+ cmd->cursor.id = cpu_to_le32(0);
+ cmd->cursor.width = cpu_to_le32(width);
+ cmd->cursor.height = cpu_to_le32(height);
+ cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
+ cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+
+ vmw_fifo_commit(dev_priv, cmd_size);
+
+ return 0;
+}
+
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t count;
+
+ iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+ iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+ iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+ count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+}
+
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *dmabuf = NULL;
+ int ret;
+
+ if (handle) {
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ handle, &surface);
+ if (!ret) {
+ if (!surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ } else {
+ ret = vmw_user_dmabuf_lookup(tfile,
+ handle, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* takedown old cursor */
+ if (du->cursor_surface) {
+ du->cursor_surface->snooper.crtc = NULL;
+ vmw_surface_unreference(&du->cursor_surface);
+ }
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+
+ /* setup new image */
+ if (surface) {
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_surface = surface;
+
+ du->cursor_surface->snooper.crtc = crtc;
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv, surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ } else if (dmabuf) {
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_dmabuf = dmabuf;
+
+ kmap_offset = 0;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ vmw_cursor_update_image(dev_priv, virtual, 64, 64,
+ du->hotspot_x, du->hotspot_y);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ } else {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return 0;
+ }
+
+ vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
+
+ du->cursor_x = x + crtc->x;
+ du->cursor_y = y + crtc->y;
+
+ vmw_cursor_update_position(dev_priv, shown,
+ du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ unsigned box_count;
+ void *virtual;
+ bool dummy;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.pitch != (64 * 4) ||
+ cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->w != 64 || box->h != 64 || box->d != 1 ||
+ box_count != 1) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle partial uploads and pitch != 256 */
+ /* TODO handle more then one copy (size != 64) */
+ DRM_ERROR("lazy programer, cant handle wierd stuff\n");
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+
+ memcpy(srf->snooper.image, virtual, 64*64*4);
+ srf->snooper.age++;
+
+ /* we can't call this function from this function since execbuf has
+ * reserved fifo space.
+ *
+ * if (srf->snooper.crtc)
+ * vmw_ldu_crtc_cursor_update_image(dev_priv,
+ * srf->snooper.image, 64, 64,
+ * du->hotspot_x, du->hotspot_y);
+ */
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ if (!du->cursor_surface ||
+ du->cursor_age == du->cursor_surface->snooper.age)
+ continue;
+
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv,
+ du->cursor_surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/*
+ * Generic framebuffer code
+ */
+
+int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ if (handle)
+ handle = 0;
+
+ return 0;
+}
+
+/*
+ * Surface framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+
+struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct delayed_work d_work;
+ struct mutex work_lock;
+ bool present_fs;
+};
+
+void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_surface *vfb =
+ vmw_framebuffer_to_vfbs(framebuffer);
+
+ cancel_delayed_work_sync(&vfb->d_work);
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_surface_unreference(&vfb->surface);
+
+ kfree(framebuffer);
+}
+
+static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+{
+ struct delayed_work *d_work =
+ container_of(work, struct delayed_work, work);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(d_work, struct vmw_framebuffer_surface, d_work);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_framebuffer *framebuffer = &vfbs->base.base;
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ mutex_lock(&vfbs->work_lock);
+ if (!vfbs->present_fs)
+ goto out_unlock;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ goto out_resched;
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+ cmd->cr.x = cpu_to_le32(0);
+ cmd->cr.y = cpu_to_le32(0);
+ cmd->cr.srcx = cmd->cr.x;
+ cmd->cr.srcy = cmd->cr.y;
+ cmd->cr.w = cpu_to_le32(framebuffer->width);
+ cmd->cr.h = cpu_to_le32(framebuffer->height);
+ vfbs->present_fs = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+out_resched:
+ /**
+ * Will not re-add if already pending.
+ */
+ schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+out_unlock:
+ mutex_unlock(&vfbs->work_lock);
+}
+
+
+int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_clip_rect norect;
+ SVGA3dCopyRect *cr;
+ int i, inc = 1;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ if (!num_clips ||
+ !(dev_priv->fifo.capabilities &
+ SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ int ret;
+
+ mutex_lock(&vfbs->work_lock);
+ vfbs->present_fs = true;
+ ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+ mutex_unlock(&vfbs->work_lock);
+ if (ret) {
+ /**
+ * No work pending, Force immediate present.
+ */
+ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
+ }
+ return 0;
+ }
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ inc = 2; /* skip source rects */
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+
+ for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
+ cr->x = cpu_to_le16(clips->x1);
+ cr->y = cpu_to_le16(clips->y1);
+ cr->srcx = cr->x;
+ cr->srcy = cr->y;
+ cr->w = cpu_to_le16(clips->x2 - clips->x1);
+ cr->h = cpu_to_le16(clips->y2 - clips->y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .destroy = vmw_framebuffer_surface_destroy,
+ .dirty = vmw_framebuffer_surface_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ struct vmw_surface *surface,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_surface *vfbs;
+ int ret;
+
+ vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+ if (!vfbs) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_surface_reference(surface)) {
+ DRM_ERROR("failed to reference surface %p\n", surface);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbs->base.base.bits_per_pixel = 32;
+ vfbs->base.base.pitch = width * 32 / 4;
+ vfbs->base.base.depth = 24;
+ vfbs->base.base.width = width;
+ vfbs->base.base.height = height;
+ vfbs->base.pin = NULL;
+ vfbs->base.unpin = NULL;
+ vfbs->surface = surface;
+ mutex_init(&vfbs->work_lock);
+ INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+ *out = &vfbs->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbs->base.base);
+out_err2:
+ kfree(vfbs);
+out_err1:
+ return ret;
+}
+
+/*
+ * Dmabuf framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+
+struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+};
+
+void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(framebuffer);
+
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_dmabuf_unreference(&vfbd->buffer);
+
+ kfree(vfbd);
+}
+
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct drm_clip_rect norect;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+ int i, increment = 1;
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; i++, clips += increment) {
+ cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd[i].body.x = cpu_to_le32(clips->x1);
+ cmd[i].body.y = cpu_to_le32(clips->y1);
+ cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
+ cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+ int ret;
+
+ vmw_overlay_pause_all(dev_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
+ vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
+ vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
+ vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
+ vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+ } else
+ WARN_ON(true);
+
+ vmw_overlay_resume_all(dev_priv);
+
+ return 0;
+}
+
+static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+
+ if (!vfbd->buffer) {
+ WARN_ON(!vfbd->buffer);
+ return 0;
+ }
+
+ return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+}
+
+int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_dmabuf *vfbd;
+ int ret;
+
+ vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
+ if (!vfbd) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_dmabuf_reference(dmabuf)) {
+ DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbd->base.base.bits_per_pixel = 32;
+ vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.depth = 24;
+ vfbd->base.base.width = width;
+ vfbd->base.base.height = height;
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ vfbd->buffer = dmabuf;
+ *out = &vfbd->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbd->base.base);
+out_err2:
+ kfree(vfbd);
+out_err1:
+ return ret;
+}
+
+/*
+ * Generic Kernel modesetting functions
+ */
+
+static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_framebuffer *vfb = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *bo = NULL;
+ int ret;
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle, &surface);
+ if (ret)
+ goto try_dmabuf;
+
+ if (!surface->scanout)
+ goto err_not_scanout;
+
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+ return &vfb->base;
+
+try_dmabuf:
+ DRM_INFO("%s: trying buffer\n", __func__);
+
+ ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
+ if (ret) {
+ DRM_ERROR("failed to find buffer: %i\n", ret);
+ return NULL;
+ }
+
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
+ vmw_dmabuf_unreference(&bo);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+
+ return &vfb->base;
+
+err_not_scanout:
+ DRM_ERROR("surface not marked as scanout\n");
+ /* vmw_user_surface_lookup takes one ref */
+ vmw_surface_unreference(&surface);
+
+ return NULL;
+}
+
+static int vmw_kms_fb_changed(struct drm_device *dev)
+{
+ return 0;
+}
+
+static struct drm_mode_config_funcs vmw_kms_funcs = {
+ .fb_create = vmw_kms_fb_create,
+ .fb_changed = vmw_kms_fb_changed,
+};
+
+int vmw_kms_init(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vmw_kms_funcs;
+ dev->mode_config.min_width = 1;
+ dev->mode_config.min_height = 1;
+ dev->mode_config.max_width = dev_priv->fb_max_width;
+ dev->mode_config.max_height = dev_priv->fb_max_height;
+
+ ret = vmw_kms_init_legacy_display_system(dev_priv);
+
+ return 0;
+}
+
+int vmw_kms_close(struct vmw_private *dev_priv)
+{
+ /*
+ * Docs says we should take the lock before calling this function
+ * but since it destroys encoders and our destructor calls
+ * drm_encoder_cleanup which takes the lock we deadlock.
+ */
+ drm_mode_config_cleanup(dev_priv->dev);
+ vmw_kms_close_legacy_display_system(dev_priv);
+ return 0;
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = obj_to_crtc(obj);
+ du = vmw_crtc_to_du(crtc);
+
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
+ /*
+ * setup a single multimon monitor with the size
+ * of 0x0, this stops the UI from resizing when we
+ * change the framebuffer size
+ */
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
+ vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+ vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
+ vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+
+ return 0;
+}
+
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
+{
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+
+ /* TODO check for multimon */
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 0000000..8b95249
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_KMS_H_
+#define VMWGFX_KMS_H_
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+#define vmw_framebuffer_to_vfb(x) \
+ container_of(x, struct vmw_framebuffer, base)
+
+/**
+ * Base class for framebuffers
+ *
+ * @pin is called the when ever a crtc uses this framebuffer
+ * @unpin is called
+ */
+struct vmw_framebuffer {
+ struct drm_framebuffer base;
+ int (*pin)(struct vmw_framebuffer *fb);
+ int (*unpin)(struct vmw_framebuffer *fb);
+};
+
+
+#define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+
+/*
+ * Basic cursor manipulation
+ */
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y);
+
+/**
+ * Base class display unit.
+ *
+ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
+ * so the display unit is all of them at the same time. This is true for both
+ * legacy multimon and screen objects.
+ */
+struct vmw_display_unit {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct vmw_surface *cursor_surface;
+ struct vmw_dma_buffer *cursor_dmabuf;
+ size_t cursor_age;
+
+ int cursor_x;
+ int cursor_y;
+
+ int hotspot_x;
+ int hotspot_y;
+
+ unsigned unit;
+};
+
+/*
+ * Shared display unit functions - vmwgfx_kms.c
+ */
+void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/*
+ * Legacy display unit functions - vmwgfx_ldu.h
+ */
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 0000000..9089159
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+#define vmw_crtc_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.crtc)
+#define vmw_encoder_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.encoder)
+#define vmw_connector_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.connector)
+
+struct vmw_legacy_display {
+ struct list_head active;
+
+ unsigned num_active;
+
+ struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using the legacy register interface.
+ */
+struct vmw_legacy_display_unit {
+ struct vmw_display_unit base;
+
+ struct list_head active;
+
+ unsigned unit;
+};
+
+static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
+{
+ list_del_init(&ldu->active);
+ vmw_display_unit_cleanup(&ldu->base);
+ kfree(ldu);
+}
+
+
+/*
+ * Legacy Display Unit CRTC functions
+ */
+
+static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
+{
+}
+
+static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
+}
+
+static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+{
+ struct vmw_legacy_display *lds = dev_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ /* to stop the screen from changing size on resize */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < lds->num_active; i++) {
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ /* Now set the mode */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ i = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ if (list_empty(&ldu->active))
+ return 0;
+
+ list_del_init(&ldu->active);
+ if (--(ld->num_active) == 0) {
+ BUG_ON(!ld->fb);
+ if (ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ ld->fb = NULL;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu,
+ struct vmw_framebuffer *vfb)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct list_head *at;
+
+ if (!list_empty(&ldu->active))
+ return 0;
+
+ at = &ld->active;
+ list_for_each_entry(entry, &ld->active, active) {
+ if (entry->unit > ldu->unit)
+ break;
+
+ at = &entry->active;
+ }
+
+ list_add(&ldu->active, at);
+ if (ld->num_active++ == 0) {
+ BUG_ON(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct vmw_framebuffer *vfb;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ /* get the ldu */
+ crtc = set->crtc;
+ ldu = vmw_crtc_to_ldu(crtc);
+ vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+ dev_priv = vmw_priv(crtc->dev);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("to many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &ldu->base.connector) {
+ DRM_ERROR("connector doesn't match %p %p\n",
+ set->connectors[0], &ldu->base.connector);
+ return -EINVAL;
+ }
+
+ /* ldu only supports one fb active at the time */
+ if (dev_priv->ldu_priv->fb && vfb &&
+ dev_priv->ldu_priv->fb != vfb) {
+ DRM_ERROR("Multiple framebuffers not supported\n");
+ return -EINVAL;
+ }
+
+ /* since they always map one to one these are safe */
+ connector = &ldu->base.connector;
+ encoder = &ldu->base.encoder;
+
+ /* should we turn the crtc off? */
+ if (set->num_connectors == 0 || !set->mode || !set->fb) {
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+
+ vmw_ldu_del_active(dev_priv, ldu);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+ }
+
+
+ /* we now know we want to set a mode */
+ mode = set->mode;
+ fb = set->fb;
+
+ if (set->x + mode->hdisplay > fb->width ||
+ set->y + mode->vdisplay > fb->height) {
+ DRM_ERROR("set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ vmw_fb_off(dev_priv);
+
+ crtc->fb = fb;
+ encoder->crtc = crtc;
+ connector->encoder = encoder;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ crtc->mode = *mode;
+
+ vmw_ldu_add_active(dev_priv, ldu, vfb);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+}
+
+static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+ .save = vmw_ldu_crtc_save,
+ .restore = vmw_ldu_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_ldu_crtc_gamma_set,
+ .destroy = vmw_ldu_crtc_destroy,
+ .set_config = vmw_ldu_crtc_set_config,
+};
+
+/*
+ * Legacy Display Unit encoder functions
+ */
+
+static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+ .destroy = vmw_ldu_encoder_destroy,
+};
+
+/*
+ * Legacy Display Unit connector functions
+ */
+
+static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static void vmw_ldu_connector_save(struct drm_connector *connector)
+{
+}
+
+static void vmw_ldu_connector_restore(struct drm_connector *connector)
+{
+}
+
+static enum drm_connector_status
+ vmw_ldu_connector_detect(struct drm_connector *connector)
+{
+ /* XXX vmwctrl should control connection status */
+ if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
+ 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ int i;
+
+ for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
+ vmw_ldu_connector_builtin[i].vdisplay > max_height)
+ continue;
+
+ mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+static int vmw_ldu_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+{
+ vmw_ldu_destroy(vmw_connector_to_ldu(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_ldu_connector_dpms,
+ .save = vmw_ldu_connector_save,
+ .restore = vmw_ldu_connector_restore,
+ .detect = vmw_ldu_connector_detect,
+ .fill_modes = vmw_ldu_connector_fill_modes,
+ .set_property = vmw_ldu_connector_set_property,
+ .destroy = vmw_ldu_connector_destroy,
+};
+
+static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
+ if (!ldu)
+ return -ENOMEM;
+
+ ldu->unit = unit;
+ crtc = &ldu->base.crtc;
+ encoder = &ldu->base.encoder;
+ connector = &ldu->base.connector;
+
+ drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ /* Initial status */
+ if (unit == 0)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+
+ drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ INIT_LIST_HEAD(&ldu->active);
+
+ drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (dev_priv->ldu_priv) {
+ DRM_INFO("ldu system already on\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+
+ if (!dev_priv->ldu_priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
+ dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->fb = NULL;
+
+ drm_mode_create_dirty_info_property(dev_priv->dev);
+
+ vmw_ldu_init(dev_priv, 0);
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+
+ return 0;
+}
+
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->ldu_priv)
+ return -ENOSYS;
+
+ BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
+
+ kfree(dev_priv->ldu_priv);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 0000000..5b6eabe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,625 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#include "svga_overlay.h"
+#include "svga_escape.h"
+
+#define VMW_MAX_NUM_STREAMS 1
+
+struct vmw_stream {
+ struct vmw_dma_buffer *buf;
+ bool claimed;
+ bool paused;
+ struct drm_vmw_control_stream_arg saved;
+};
+
+/**
+ * Overlay control
+ */
+struct vmw_overlay {
+ /*
+ * Each stream is a single overlay. In Xv these are called ports.
+ */
+ struct mutex mutex;
+ struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
+};
+
+static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ return dev_priv ? dev_priv->overlay_priv : NULL;
+}
+
+struct vmw_escape_header {
+ uint32_t cmd;
+ SVGAFifoCmdEscape body;
+};
+
+struct vmw_escape_video_flush {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoFlush flush;
+};
+
+static inline void fill_escape(struct vmw_escape_header *header,
+ uint32_t size)
+{
+ header->cmd = SVGA_CMD_ESCAPE;
+ header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
+ header->body.size = size;
+}
+
+static inline void fill_flush(struct vmw_escape_video_flush *cmd,
+ uint32_t stream_id)
+{
+ fill_escape(&cmd->escape, sizeof(cmd->flush));
+ cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
+ cmd->flush.streamId = stream_id;
+}
+
+/**
+ * Pin or unpin a buffer in vram.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin or unpin.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Takes the current masters ttm lock in read.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_placement *overlay_placement = &vmw_vram_placement;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (pin)
+ overlay_placement = &vmw_vram_ne_placement;
+
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->active_master->lock);
+
+ return ret;
+}
+
+/**
+ * Send put command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ struct {
+ struct {
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } items[SVGA_VIDEO_PITCH_3 + 1];
+ } body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ uint32_t offset;
+ int i, ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = arg->stream_id;
+
+ for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
+ cmds->body.items[i].registerId = i;
+
+ offset = buf->base.offset + arg->offset;
+
+ cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
+ cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
+ cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
+ cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
+ cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
+ cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+
+ fill_flush(&cmds->flush, arg->stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Send stop command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoSetRegs body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ int ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = stream_id;
+ cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
+ cmds->body.items[0].value = false;
+ fill_flush(&cmds->flush, stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Stop or pause a stream.
+ *
+ * If the stream is paused the no evict flag is removed from the buffer
+ * but left in vram. This allows for instance mode_set to evict it
+ * should it need to.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * @stream_id which stream to stop/pause.
+ * @pause true to pause, false to stop completely.
+ */
+static int vmw_overlay_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id, bool pause,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[stream_id];
+ int ret;
+
+ /* no buffer attached the stream is completely stopped */
+ if (!stream->buf)
+ return 0;
+
+ /* If the stream is paused this is already done */
+ if (!stream->paused) {
+ ret = vmw_overlay_send_stop(dev_priv, stream_id,
+ interruptible);
+ if (ret)
+ return ret;
+
+ /* We just remove the NO_EVICT flag so no -ENOMEM */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
+ interruptible);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ if (!pause) {
+ vmw_dmabuf_unreference(&stream->buf);
+ stream->paused = false;
+ } else {
+ stream->paused = true;
+ }
+
+ return 0;
+}
+
+/**
+ * Update a stream and send any put or stop fifo commands needed.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * Returns
+ * -ENOMEM if buffer doesn't fit in vram.
+ * -ERESTARTSYS if interrupted.
+ */
+static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[arg->stream_id];
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
+ stream->buf, buf, stream->paused ? "" : "not ");
+
+ if (stream->buf != buf) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id,
+ false, interruptible);
+ if (ret)
+ return ret;
+ } else if (!stream->paused) {
+ /* If the buffers match and not paused then just send
+ * the put command, no need to do anything else.
+ */
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret == 0)
+ stream->saved = *arg;
+ else
+ BUG_ON(!interruptible);
+
+ return ret;
+ }
+
+ /* We don't start the old stream if we are interrupted.
+ * Might return -ENOMEM if it can't fit the buffer in vram.
+ */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+ if (ret)
+ return ret;
+
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret) {
+ /* This one needs to happen no matter what. We only remove
+ * the NO_EVICT flag so this is safe from -ENOMEM.
+ */
+ BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+ return ret;
+ }
+
+ if (stream->buf != buf)
+ stream->buf = vmw_dmabuf_reference(buf);
+ stream->saved = *arg;
+
+ return 0;
+}
+
+/**
+ * Stop all streams.
+ *
+ * Used by the fb code when starting.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_stop_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->buf)
+ continue;
+
+ ret = vmw_overlay_stop(dev_priv, i, false, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Try to resume all paused streams.
+ *
+ * Used by the kms code after moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_resume_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->paused)
+ continue;
+
+ ret = vmw_overlay_update_stream(dev_priv, stream->buf,
+ &stream->saved, false);
+ if (ret != 0)
+ DRM_INFO("%s: *warning* failed to resume stream %i\n",
+ __func__, i);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Pauses all active streams.
+ *
+ * Used by the kms code when moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_pause_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].paused)
+ DRM_INFO("%s: *warning* stream %i already paused\n",
+ __func__, i);
+ ret = vmw_overlay_stop(dev_priv, i, true, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct drm_vmw_control_stream_arg *arg =
+ (struct drm_vmw_control_stream_arg *)data;
+ struct vmw_dma_buffer *buf;
+ struct vmw_resource *res;
+ int ret;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
+ if (ret)
+ return ret;
+
+ mutex_lock(&overlay->mutex);
+
+ if (!arg->enabled) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
+ goto out_unlock;
+ }
+
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ if (ret)
+ goto out_unlock;
+
+ ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+ vmw_dmabuf_unreference(&buf);
+
+out_unlock:
+ mutex_unlock(&overlay->mutex);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->overlay_priv)
+ return 0;
+
+ return VMW_MAX_NUM_STREAMS;
+}
+
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, k;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
+ if (!overlay->stream[i].claimed)
+ k++;
+
+ mutex_unlock(&overlay->mutex);
+
+ return k;
+}
+
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+
+ if (overlay->stream[i].claimed)
+ continue;
+
+ overlay->stream[i].claimed = true;
+ *out = i;
+ mutex_unlock(&overlay->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&overlay->mutex);
+ return -ESRCH;
+}
+
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+
+ BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ WARN_ON(!overlay->stream[stream_id].claimed);
+ vmw_overlay_stop(dev_priv, stream_id, false, false);
+ overlay->stream[stream_id].claimed = false;
+
+ mutex_unlock(&overlay->mutex);
+ return 0;
+}
+
+int vmw_overlay_init(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay;
+ int i;
+
+ if (dev_priv->overlay_priv)
+ return -EINVAL;
+
+ if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
+ (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
+ DRM_INFO("hardware doesn't support overlays\n");
+ return -ENOSYS;
+ }
+
+ overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+ if (!overlay)
+ return -ENOMEM;
+
+ memset(overlay, 0, sizeof(*overlay));
+ mutex_init(&overlay->mutex);
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ overlay->stream[i].buf = NULL;
+ overlay->stream[i].paused = false;
+ overlay->stream[i].claimed = false;
+ }
+
+ dev_priv->overlay_priv = overlay;
+
+ return 0;
+}
+
+int vmw_overlay_close(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ bool forgotten_buffer = false;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].buf) {
+ forgotten_buffer = true;
+ vmw_overlay_stop(dev_priv, i, false, false);
+ }
+ }
+
+ WARN_ON(forgotten_buffer);
+
+ dev_priv->overlay_priv = NULL;
+ kfree(overlay);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 0000000..9d0dd3a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * This file contains virtual hardware defines for kernel space.
+ */
+
+#ifndef _VMWGFX_REG_H_
+#define _VMWGFX_REG_H_
+
+#include <linux/types.h>
+
+#define VMWGFX_INDEX_PORT 0x0
+#define VMWGFX_VALUE_PORT 0x1
+#define VMWGFX_IRQSTATUS_PORT 0x8
+
+struct svga_guest_mem_descriptor {
+ __le32 ppn;
+ __le32 num_pages;
+};
+
+struct svga_fifo_cmd_fence {
+ __le32 fence;
+};
+
+#define SVGA_SYNC_GENERIC 1
+#define SVGA_SYNC_FIFOFULL 2
+
+#include "svga_types.h"
+
+#include "svga3d_reg.h"
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 0000000..f8fbbc6
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1187 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_placement.h"
+#include "drmP.h"
+
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+};
+
+struct vmw_user_dma_buffer {
+ struct ttm_base_object base;
+ struct vmw_dma_buffer dma;
+};
+
+struct vmw_bo_user_rep {
+ uint32_t handle;
+ uint64_t map_handle;
+};
+
+struct vmw_stream {
+ struct vmw_resource res;
+ uint32_t stream_id;
+};
+
+struct vmw_user_stream {
+ struct ttm_base_object base;
+ struct vmw_stream stream;
+};
+
+static inline struct vmw_dma_buffer *
+vmw_dma_buffer(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_dma_buffer, base);
+}
+
+static inline struct vmw_user_dma_buffer *
+vmw_user_dma_buffer(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
+}
+
+struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
+{
+ kref_get(&res->kref);
+ return res;
+}
+
+static void vmw_resource_release(struct kref *kref)
+{
+ struct vmw_resource *res =
+ container_of(kref, struct vmw_resource, kref);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ idr_remove(res->idr, res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (likely(res->hw_destroy != NULL))
+ res->hw_destroy(res);
+
+ if (res->res_free != NULL)
+ res->res_free(res);
+ else
+ kfree(res);
+
+ write_lock(&dev_priv->resource_lock);
+}
+
+void vmw_resource_unreference(struct vmw_resource **p_res)
+{
+ struct vmw_resource *res = *p_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ *p_res = NULL;
+ write_lock(&dev_priv->resource_lock);
+ kref_put(&res->kref, vmw_resource_release);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
+
+ do {
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ write_lock(&dev_priv->resource_lock);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_activate
+ *
+ * @res: Pointer to the newly created resource
+ * @hw_destroy: Destroy function. NULL if none.
+ *
+ * Activate a resource after the hardware has been made aware of it.
+ * Set tye destroy function to @destroy. Typically this frees the
+ * resource and destroys the hardware resources associated with it.
+ * Activate basically means that the function vmw_resource_lookup will
+ * find it.
+ */
+
+static void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ write_lock(&dev_priv->resource_lock);
+ res->avail = true;
+ res->hw_destroy = hw_destroy;
+ write_unlock(&dev_priv->resource_lock);
+}
+
+struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+ struct idr *idr, int id)
+{
+ struct vmw_resource *res;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(idr, id);
+ if (res && res->avail)
+ kref_get(&res->kref);
+ else
+ res = NULL;
+ read_unlock(&dev_priv->resource_lock);
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
+ VMW_RES_CONTEXT, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+
+ kfree(ctx);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_context *ctx;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_context_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(ctx == NULL))
+ return -ENOMEM;
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+
+}
+
+int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id)
+{
+ struct vmw_resource *res;
+ int ret = 0;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(&dev_priv->context_idr, id);
+ if (res && res->avail) {
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable)
+ ret = -EPERM;
+ } else
+ ret = -EINVAL;
+ read_unlock(&dev_priv->resource_lock);
+
+ return ret;
+}
+
+
+/**
+ * Surface management.
+ */
+
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.sid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+void vmw_surface_res_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(srf);
+}
+
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+ } *cmd;
+ SVGA3dSize *cmd_size;
+ struct vmw_resource *res = &srf->res;
+ struct drm_vmw_size *src_size;
+ size_t submit_size;
+ uint32_t cmd_len;
+ int i;
+
+ BUG_ON(res_free == NULL);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, res_free);
+
+ if (unlikely(ret != 0)) {
+ res_free(res);
+ return ret;
+ }
+
+ submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed for create surface.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
+ cmd->header.size = cpu_to_le32(cmd_len);
+ cmd->body.sid = cpu_to_le32(res->id);
+ cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ cmd->body.face[i].numMipLevels =
+ cpu_to_le32(srf->mip_levels[i]);
+ }
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = cpu_to_le32(src_size->width);
+ cmd_size->height = cpu_to_le32(src_size->height);
+ cmd_size->depth = cpu_to_le32(src_size->depth);
+ }
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return 0;
+}
+
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(user_srf);
+}
+
+int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_surface **out)
+{
+ struct vmw_resource *res;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ read_lock(&dev_priv->resource_lock);
+
+ if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ read_unlock(&dev_priv->resource_lock);
+ goto out_bad_resource;
+ }
+
+ kref_get(&res->kref);
+ read_unlock(&dev_priv->resource_lock);
+
+ *out = srf;
+ ret = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf =
+ kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i;
+
+ if (unlikely(user_srf == NULL))
+ return -ENOMEM;
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ srf->scanout = req->scanout;
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ srf->num_sizes += srf->mip_levels[i];
+
+ if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS) {
+ ret = -EINVAL;
+ goto out_err0;
+ }
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ if (srf->scanout &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image) {
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ } else {
+ DRM_ERROR("Failed to allocate cursor_image\n");
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ rep->sid = user_srf->base.hash.key;
+ if (rep->sid == SVGA3D_INVALID_ID)
+ DRM_ERROR("Created bad Surface ID.\n");
+
+ vmw_resource_unreference(&res);
+ return 0;
+out_err1:
+ kfree(srf->sizes);
+out_err0:
+ kfree(user_srf);
+ return ret;
+}
+
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id)
+{
+ struct ttm_base_object *base;
+ struct vmw_user_surface *user_srf;
+
+ int ret = -EPERM;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_surface;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ *id = user_srf->srf.res.id;
+ ret = 0;
+
+out_bad_surface:
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+
+ ttm_base_object_unref(&base);
+ return ret;
+}
+
+/**
+ * Buffer management.
+ */
+
+static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
+ unsigned long num_pages)
+{
+ static size_t bo_user_size = ~0;
+
+ size_t page_array_size =
+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+
+ if (unlikely(bo_user_size == ~0)) {
+ bo_user_size = glob->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ }
+
+ return bo_user_size + page_array_size;
+}
+
+void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+ struct vmw_private *dev_priv =
+ container_of(bo->bdev, struct vmw_private, bdev);
+
+ if (vmw_bo->gmr_bound) {
+ vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
+ spin_unlock(&glob->lru_lock);
+ vmw_bo->gmr_bound = false;
+ }
+}
+
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+
+ vmw_dmabuf_gmr_unbind(bo);
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ kfree(vmw_bo);
+}
+
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free) (struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
+ int ret;
+
+ BUG_ON(!bo_free);
+
+ acc_size =
+ vmw_dmabuf_acc_size(bdev->glob,
+ (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (unlikely(ret != 0)) {
+ /* we must free the bo here as
+ * ttm_buffer_object_init does so as well */
+ bo_free(&vmw_bo->base);
+ return ret;
+ }
+
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->gmr_lru);
+ INIT_LIST_HEAD(&vmw_bo->validate_list);
+ vmw_bo->gmr_id = 0;
+ vmw_bo->gmr_bound = false;
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, 0, interruptible,
+ NULL, acc_size, bo_free);
+ return ret;
+}
+
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+
+ vmw_dmabuf_gmr_unbind(bo);
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ kfree(vmw_user_bo);
+}
+
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ bo = &vmw_user_bo->dma.base;
+ ttm_bo_unref(&bo);
+}
+
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_buffer_object *tmp;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+ if (unlikely(vmw_user_bo == NULL))
+ return -ENOMEM;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0)) {
+ kfree(vmw_user_bo);
+ return ret;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+ &vmw_user_bo->base,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ } else {
+ rep->handle = vmw_user_bo->base.hash.key;
+ rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
+ rep->cur_gmr_id = vmw_user_bo->base.hash.key;
+ rep->cur_gmr_offset = 0;
+ }
+ ttm_bo_unref(&tmp);
+
+ ttm_read_unlock(&vmaster->lock);
+
+ return 0;
+}
+
+int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ if (likely(vmw_bo->on_validate_list))
+ return vmw_bo->cur_validate_node;
+
+ vmw_bo->cur_validate_node = cur_validate_node;
+ vmw_bo->on_validate_list = true;
+
+ return cur_validate_node;
+}
+
+void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ vmw_bo->on_validate_list = false;
+}
+
+uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM)
+ return SVGA_GMR_FRAMEBUFFER;
+
+ vmw_bo = vmw_dma_buffer(bo);
+
+ return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
+}
+
+void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_bo->gmr_bound = true;
+ vmw_bo->gmr_id = id;
+}
+
+int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_dma_buffer **out)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(base->object_type != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ (void)ttm_bo_reference(&vmw_user_bo->dma.base);
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->dma;
+
+ return 0;
+}
+
+/**
+ * TODO: Implement a gmr id eviction mechanism. Currently we just fail
+ * when we're out of ids, causing GMR space to be allocated
+ * out of VRAM.
+ */
+
+int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
+{
+ struct ttm_bo_global *glob = dev_priv->bdev.glob;
+ int id;
+ int ret;
+
+ do {
+ if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&glob->lru_lock);
+ ret = ida_get_new(&dev_priv->gmr_ida, &id);
+ spin_unlock(&glob->lru_lock);
+ } while (ret == -EAGAIN);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(id >= dev_priv->max_gmr_ids)) {
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, id);
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
+
+ *p_id = (uint32_t) id;
+ return 0;
+}
+
+/*
+ * Stream managment
+ */
+
+static void vmw_stream_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_stream *stream;
+ int ret;
+
+ DRM_INFO("%s: unref\n", __func__);
+ stream = container_of(res, struct vmw_stream, res);
+
+ ret = vmw_overlay_unref(dev_priv, stream->stream_id);
+ WARN_ON(ret != 0);
+}
+
+static int vmw_stream_init(struct vmw_private *dev_priv,
+ struct vmw_stream *stream,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_resource *res = &stream->res;
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
+ VMW_RES_STREAM, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(stream);
+ else
+ res_free(&stream->res);
+ return ret;
+ }
+
+ ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ DRM_INFO("%s: claimed\n", __func__);
+
+ vmw_resource_activate(&stream->res, vmw_stream_destroy);
+ return 0;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_stream_free(struct vmw_resource *res)
+{
+ struct vmw_user_stream *stream =
+ container_of(res, struct vmw_user_stream, stream.res);
+
+ kfree(stream);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_stream *stream =
+ container_of(base, struct vmw_user_stream, base);
+ struct vmw_resource *res = &stream->stream.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_stream *stream;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(stream == NULL))
+ return -ENOMEM;
+
+ res = &stream->stream.res;
+ stream->base.shareable = false;
+ stream->base.tfile = NULL;
+
+ ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
+ &vmw_user_stream_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->stream_id = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id, struct vmw_resource **out)
+{
+ struct vmw_user_stream *stream;
+ struct vmw_resource *res;
+ int ret;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto err_ref;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EPERM;
+ goto err_ref;
+ }
+
+ *inout_id = stream->stream.stream_id;
+ *out = res;
+ return 0;
+err_ref:
+ vmw_resource_unreference(&res);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 0000000..e3df4ad
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
+ if (vmw_fifo_mmap(filp, vma) == 0)
+ return 0;
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+ return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+}
+
+static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ DRM_INFO("global init.\n");
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+int vmw_ttm_global_init(struct vmw_private *dev_priv)
+{
+ struct ttm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vmw_ttm_mem_global_init;
+ global_ref->release = &vmw_ttm_mem_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting.\n");
+ return ret;
+ }
+
+ dev_priv->bo_global_ref.mem_glob =
+ dev_priv->mem_global_ref.object;
+ global_ref = &dev_priv->bo_global_ref.ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ ret = ttm_global_item_ref(global_ref);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM buffer objects.\n");
+ goto out_no_bo;
+ }
+
+ return 0;
+out_no_bo:
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+ return ret;
+}
+
+void vmw_ttm_global_release(struct vmw_private *dev_priv)
+{
+ ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93..2f6cf69 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
/* if target is default */
- if (!strncmp(buf, "default", 7))
+ if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 4b96e7a..5b4d66d 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -431,6 +431,13 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 80792d3..eabe5f8 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,6 +1285,9 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
@@ -1553,6 +1556,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3839340e..010368e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -88,6 +88,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
@@ -166,6 +169,9 @@
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
+#define USB_VENDOR_ID_ETT 0x0664
+#define USB_DEVICE_ID_TC5UH 0x0309
+
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index 27ae750..bf31592 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -1,8 +1,6 @@
#ifndef __HID_LG_H
#define __HID_LG_H
-#include <linux/autoconf.h>
-
#ifdef CONFIG_LOGITECH_FF
int lgff_init(struct hid_device *hdev);
#else
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 5b222ee..510dd13 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -39,7 +39,17 @@
*
* 3. 135 byte report descriptor
* Report #4 has an array field with logical range 0..17 instead of 1..14.
+ *
+ * 4. 171 byte report descriptor
+ * Report #3 has an array field with logical range 0..1 instead of 1..3.
*/
+static inline void samsung_dev_trace(struct hid_device *hdev,
+ unsigned int rsize)
+{
+ dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
+ "descriptor\n", rsize);
+}
+
static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
@@ -47,8 +57,7 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 184);
+ samsung_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
@@ -56,17 +65,21 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} else
if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
rdesc[194] == 0x25 && rdesc[195] == 0x12) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 203);
+ samsung_dev_trace(hdev, 203);
rdesc[193] = 0x1;
rdesc[195] = 0xf;
} else
if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
rdesc[126] == 0x25 && rdesc[127] == 0x11) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 135);
+ samsung_dev_trace(hdev, 135);
rdesc[125] = 0x1;
rdesc[127] = 0xe;
+ } else
+ if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
+ rdesc[162] == 0x25 && rdesc[163] == 0x01) {
+ samsung_dev_trace(hdev, 171);
+ rdesc[161] = 0x1;
+ rdesc[163] = 0x3;
}
}
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 7475421..12dcda5 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -142,6 +142,7 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
wdata->butstate = rw;
input_report_key(input, BTN_0, rw & 0x02);
input_report_key(input, BTN_1, rw & 0x01);
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
input_sync(input);
}
@@ -196,6 +197,9 @@ static int wacom_probe(struct hid_device *hdev,
/* Pad */
input->evbit[0] |= BIT(EV_MSC);
input->mscbit[0] |= BIT(MSC_SERIAL);
+ set_bit(BTN_0, input->keybit);
+ set_bit(BTN_1, input->keybit);
+ set_bit(BTN_TOOL_FINGER, input->keybit);
/* Distance, rubber and mouse */
input->absbit[0] |= BIT(ABS_DISTANCE);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 95ccbe3..68cf877 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -228,6 +228,18 @@ config SENSORS_K8TEMP
This driver can also be built as a module. If so, the module
will be called k8temp.
+config SENSORS_K10TEMP
+ tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor(s) inside your CPU. Supported are later revisions of
+ the AMD Family 10h and all revisions of the AMD Family 11h
+ microarchitectures.
+
+ This driver can also be built as a module. If so, the module
+ will be called k10temp.
+
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
@@ -380,7 +392,7 @@ config SENSORS_GL520SM
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
- depends on X86 && EXPERIMENTAL
+ depends on X86 && PCI && EXPERIMENTAL
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
@@ -780,6 +792,16 @@ config SENSORS_ADS7828
This driver can also be built as a module. If so, the module
will be called ads7828.
+config SENSORS_AMC6821
+ tristate "Texas Instruments AMC6821"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Texas Instruments
+ AMC6821 hardware monitoring chips.
+
+ This driver can also be build as a module. If so, the module
+ will be called amc6821.
+
config SENSORS_THMC50
tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
depends on I2C && EXPERIMENTAL
@@ -810,6 +832,14 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_VIA_CPUTEMP
+ tristate "VIA CPU temperature sensor"
+ depends on X86
+ help
+ If you say yes here you get support for the temperature
+ sensor inside your CPU. Supported are all known variants of
+ the VIA C7 and Nano.
+
config SENSORS_VIA686A
tristate "VIA686A"
depends on PCI
@@ -998,6 +1028,23 @@ config SENSORS_LIS3_SPI
will be called lis3lv02d and a specific module for the SPI transport
is called lis3lv02d_spi.
+config SENSORS_LIS3_I2C
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ depends on I2C && INPUT
+ select INPUT_POLLDEV
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via I2C. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the device to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the I2C transport
+ is called lis3lv02d_i2c.
+
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 33c2ee1..4bc215c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -53,8 +53,10 @@ obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
+obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
+obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -84,9 +86,11 @@ obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
+obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a1a7ef1..b8156b4 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -94,7 +94,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
#define ADT7462_PIN24_SHIFT 6
#define ADT7462_PIN26_VOLT_INPUT 0x08
#define ADT7462_PIN25_VOLT_INPUT 0x20
-#define ADT7462_PIN28_SHIFT 6 /* cfg3 */
+#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
#define ADT7462_PIN28_VOLT 0x5
#define ADT7462_REG_ALARM1 0xB8
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
*
* Some, but not all, of these voltages have low/high limits.
*/
-#define ADT7462_VOLT_COUNT 12
+#define ADT7462_VOLT_COUNT 13
#define ADT7462_VENDOR 0x41
#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
new file mode 100644
index 0000000..fa9708c
--- /dev/null
+++ b/drivers/hwmon/amc6821.c
@@ -0,0 +1,1115 @@
+/*
+ amc6821.c - Part of lm_sensors, Linux kernel modules for hardware
+ monitoring
+ Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si>
+
+ Based on max6650.c:
+ Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+
+#include <linux/kernel.h> /* Needed for KERN_INFO */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+
+/*
+ * Addresses to scan.
+ */
+
+static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
+ 0x4c, 0x4d, 0x4e, I2C_CLIENT_END};
+
+
+
+/*
+ * Insmod parameters
+ */
+
+static int pwminv = 0; /*Inverted PWM output. */
+module_param(pwminv, int, S_IRUGO);
+
+static int init = 1; /*Power-on initialization.*/
+module_param(init, int, S_IRUGO);
+
+
+enum chips { amc6821 };
+
+#define AMC6821_REG_DEV_ID 0x3D
+#define AMC6821_REG_COMP_ID 0x3E
+#define AMC6821_REG_CONF1 0x00
+#define AMC6821_REG_CONF2 0x01
+#define AMC6821_REG_CONF3 0x3F
+#define AMC6821_REG_CONF4 0x04
+#define AMC6821_REG_STAT1 0x02
+#define AMC6821_REG_STAT2 0x03
+#define AMC6821_REG_TDATA_LOW 0x08
+#define AMC6821_REG_TDATA_HI 0x09
+#define AMC6821_REG_LTEMP_HI 0x0A
+#define AMC6821_REG_RTEMP_HI 0x0B
+#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15
+#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14
+#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19
+#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18
+#define AMC6821_REG_LTEMP_CRIT 0x1B
+#define AMC6821_REG_RTEMP_CRIT 0x1D
+#define AMC6821_REG_PSV_TEMP 0x1C
+#define AMC6821_REG_DCY 0x22
+#define AMC6821_REG_LTEMP_FAN_CTRL 0x24
+#define AMC6821_REG_RTEMP_FAN_CTRL 0x25
+#define AMC6821_REG_DCY_LOW_TEMP 0x21
+
+#define AMC6821_REG_TACH_LLIMITL 0x10
+#define AMC6821_REG_TACH_LLIMITH 0x11
+#define AMC6821_REG_TACH_HLIMITL 0x12
+#define AMC6821_REG_TACH_HLIMITH 0x13
+
+#define AMC6821_CONF1_START 0x01
+#define AMC6821_CONF1_FAN_INT_EN 0x02
+#define AMC6821_CONF1_FANIE 0x04
+#define AMC6821_CONF1_PWMINV 0x08
+#define AMC6821_CONF1_FAN_FAULT_EN 0x10
+#define AMC6821_CONF1_FDRC0 0x20
+#define AMC6821_CONF1_FDRC1 0x40
+#define AMC6821_CONF1_THERMOVIE 0x80
+
+#define AMC6821_CONF2_PWM_EN 0x01
+#define AMC6821_CONF2_TACH_MODE 0x02
+#define AMC6821_CONF2_TACH_EN 0x04
+#define AMC6821_CONF2_RTFIE 0x08
+#define AMC6821_CONF2_LTOIE 0x10
+#define AMC6821_CONF2_RTOIE 0x20
+#define AMC6821_CONF2_PSVIE 0x40
+#define AMC6821_CONF2_RST 0x80
+
+#define AMC6821_CONF3_THERM_FAN_EN 0x80
+#define AMC6821_CONF3_REV_MASK 0x0F
+
+#define AMC6821_CONF4_OVREN 0x10
+#define AMC6821_CONF4_TACH_FAST 0x20
+#define AMC6821_CONF4_PSPR 0x40
+#define AMC6821_CONF4_MODE 0x80
+
+#define AMC6821_STAT1_RPM_ALARM 0x01
+#define AMC6821_STAT1_FANS 0x02
+#define AMC6821_STAT1_RTH 0x04
+#define AMC6821_STAT1_RTL 0x08
+#define AMC6821_STAT1_R_THERM 0x10
+#define AMC6821_STAT1_RTF 0x20
+#define AMC6821_STAT1_LTH 0x40
+#define AMC6821_STAT1_LTL 0x80
+
+#define AMC6821_STAT2_RTC 0x08
+#define AMC6821_STAT2_LTC 0x10
+#define AMC6821_STAT2_LPSV 0x20
+#define AMC6821_STAT2_L_THERM 0x40
+#define AMC6821_STAT2_THERM_IN 0x80
+
+enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX,
+ IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN,
+ IDX_TEMP2_MAX, IDX_TEMP2_CRIT,
+ TEMP_IDX_LEN, };
+
+static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI,
+ AMC6821_REG_LTEMP_LIMIT_MIN,
+ AMC6821_REG_LTEMP_LIMIT_MAX,
+ AMC6821_REG_LTEMP_CRIT,
+ AMC6821_REG_RTEMP_HI,
+ AMC6821_REG_RTEMP_LIMIT_MIN,
+ AMC6821_REG_RTEMP_LIMIT_MAX,
+ AMC6821_REG_RTEMP_CRIT, };
+
+enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX,
+ FAN1_IDX_LEN, };
+
+static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW,
+ AMC6821_REG_TACH_LLIMITL,
+ AMC6821_REG_TACH_HLIMITL, };
+
+
+static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
+ AMC6821_REG_TACH_LLIMITH,
+ AMC6821_REG_TACH_HLIMITH, };
+
+static int amc6821_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id);
+static int amc6821_detect(
+ struct i2c_client *client,
+ struct i2c_board_info *info);
+static int amc6821_init_client(struct i2c_client *client);
+static int amc6821_remove(struct i2c_client *client);
+static struct amc6821_data *amc6821_update_device(struct device *dev);
+
+/*
+ * Driver data (common to all clients)
+ */
+
+static const struct i2c_device_id amc6821_id[] = {
+ { "amc6821", amc6821 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, amc6821_id);
+
+static struct i2c_driver amc6821_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "amc6821",
+ },
+ .probe = amc6821_probe,
+ .remove = amc6821_remove,
+ .id_table = amc6821_id,
+ .detect = amc6821_detect,
+ .address_list = normal_i2c,
+};
+
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct amc6821_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* zero until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+
+ /* register values */
+ int temp[TEMP_IDX_LEN];
+
+ u16 fan[FAN1_IDX_LEN];
+ u8 fan1_div;
+
+ u8 pwm1;
+ u8 temp1_auto_point_temp[3];
+ u8 temp2_auto_point_temp[3];
+ u8 pwm1_auto_point_pwm[3];
+ u8 pwm1_enable;
+ u8 pwm1_auto_channels_temp;
+
+ u8 stat1;
+ u8 stat2;
+};
+
+
+static ssize_t get_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp[ix] * 1000);
+}
+
+
+
+static ssize_t set_temp(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int ix = to_sensor_dev_attr(attr)->index;
+ long val;
+
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ val = SENSORS_LIMIT(val / 1000, -128, 127);
+
+ mutex_lock(&data->update_lock);
+ data->temp[ix] = val;
+ if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+
+static ssize_t get_temp_alarm(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+ u8 flag;
+
+ switch (ix) {
+ case IDX_TEMP1_MIN:
+ flag = data->stat1 & AMC6821_STAT1_LTL;
+ break;
+ case IDX_TEMP1_MAX:
+ flag = data->stat1 & AMC6821_STAT1_LTH;
+ break;
+ case IDX_TEMP1_CRIT:
+ flag = data->stat2 & AMC6821_STAT2_LTC;
+ break;
+ case IDX_TEMP2_MIN:
+ flag = data->stat1 & AMC6821_STAT1_RTL;
+ break;
+ case IDX_TEMP2_MAX:
+ flag = data->stat1 & AMC6821_STAT1_RTH;
+ break;
+ case IDX_TEMP2_CRIT:
+ flag = data->stat2 & AMC6821_STAT2_RTC;
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
+ return -EINVAL;
+ }
+ if (flag)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+
+
+
+static ssize_t get_temp2_fault(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ if (data->stat1 & AMC6821_STAT1_RTF)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+static ssize_t get_pwm1(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1);
+}
+
+static ssize_t set_pwm1(
+ struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->pwm1 = SENSORS_LIMIT(val , 0, 255);
+ i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t get_pwm1_enable(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_enable);
+}
+
+static ssize_t set_pwm1_enable(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int config = strict_strtol(buf, 10, &val);
+ if (config)
+ return config;
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return -EIO;
+ }
+
+ switch (val) {
+ case 1:
+ config &= ~AMC6821_CONF1_FDRC0;
+ config &= ~AMC6821_CONF1_FDRC1;
+ break;
+ case 2:
+ config &= ~AMC6821_CONF1_FDRC0;
+ config |= AMC6821_CONF1_FDRC1;
+ break;
+ case 3:
+ config |= AMC6821_CONF1_FDRC0;
+ config |= AMC6821_CONF1_FDRC1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mutex_lock(&data->update_lock);
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ count = -EIO;
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+static ssize_t get_pwm1_auto_channels_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp);
+}
+
+
+static ssize_t get_temp_auto_point_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int ix = to_sensor_dev_attr_2(devattr)->index;
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ struct amc6821_data *data = amc6821_update_device(dev);
+ switch (nr) {
+ case 1:
+ return sprintf(buf, "%d\n",
+ data->temp1_auto_point_temp[ix] * 1000);
+ break;
+ case 2:
+ return sprintf(buf, "%d\n",
+ data->temp2_auto_point_temp[ix] * 1000);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
+ return -EINVAL;
+ }
+}
+
+
+static ssize_t get_pwm1_auto_point_pwm(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int ix = to_sensor_dev_attr(devattr)->index;
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]);
+}
+
+
+static inline ssize_t set_slope_register(struct i2c_client *client,
+ u8 reg,
+ u8 dpwm,
+ u8 *ptemp)
+{
+ int dt;
+ u8 tmp;
+
+ dt = ptemp[2]-ptemp[1];
+ for (tmp = 4; tmp > 0; tmp--) {
+ if (dt * (0x20 >> tmp) >= dpwm)
+ break;
+ }
+ tmp |= (ptemp[1] & 0x7C) << 1;
+ if (i2c_smbus_write_byte_data(client,
+ reg, tmp)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static ssize_t set_temp_auto_point_temp(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 *ptemp;
+ u8 reg;
+ int dpwm;
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (nr) {
+ case 1:
+ ptemp = data->temp1_auto_point_temp;
+ reg = AMC6821_REG_LTEMP_FAN_CTRL;
+ break;
+ case 2:
+ ptemp = data->temp2_auto_point_temp;
+ reg = AMC6821_REG_RTEMP_FAN_CTRL;
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
+ return -EINVAL;
+ }
+
+ data->valid = 0;
+ mutex_lock(&data->update_lock);
+ switch (ix) {
+ case 0:
+ ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
+ data->temp1_auto_point_temp[1]);
+ ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
+ data->temp2_auto_point_temp[1]);
+ ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
+ if (i2c_smbus_write_byte_data(
+ client,
+ AMC6821_REG_PSV_TEMP,
+ ptemp[0])) {
+ dev_err(&client->dev,
+ "Register write error, aborting.\n");
+ count = -EIO;
+ }
+ goto EXIT;
+ break;
+ case 1:
+ ptemp[1] = SENSORS_LIMIT(
+ val / 1000,
+ (ptemp[0] & 0x7C) + 4,
+ 124);
+ ptemp[1] &= 0x7C;
+ ptemp[2] = SENSORS_LIMIT(
+ ptemp[2], ptemp[1] + 1,
+ 255);
+ break;
+ case 2:
+ ptemp[2] = SENSORS_LIMIT(
+ val / 1000,
+ ptemp[1]+1,
+ 255);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
+ count = -EINVAL;
+ goto EXIT;
+ }
+ dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
+ if (set_slope_register(client, reg, dpwm, ptemp))
+ count = -EIO;
+
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static ssize_t set_pwm1_auto_point_pwm(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int dpwm;
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
+ data->pwm1_auto_point_pwm[1])) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ goto EXIT;
+ }
+ dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
+ if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm,
+ data->temp1_auto_point_temp)) {
+ count = -EIO;
+ goto EXIT;
+ }
+ if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm,
+ data->temp2_auto_point_temp)) {
+ count = -EIO;
+ goto EXIT;
+ }
+
+EXIT:
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t get_fan(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+ if (0 == data->fan[ix])
+ return sprintf(buf, "0");
+ return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix]));
+}
+
+
+
+static ssize_t get_fan1_fault(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ if (data->stat1 & AMC6821_STAT1_FANS)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+
+
+static ssize_t set_fan(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int ix = to_sensor_dev_attr(attr)->index;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ val = 1 > val ? 0xFFFF : 6000000/val;
+
+ mutex_lock(&data->update_lock);
+ data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
+ if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
+ data->fan[ix] & 0xFF)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ goto EXIT;
+ }
+ if (i2c_smbus_write_byte_data(client,
+ fan_reg_hi[ix], data->fan[ix] >> 8)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ }
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static ssize_t get_fan1_div(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->fan1_div);
+}
+
+static ssize_t set_fan1_div(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int config = strict_strtol(buf, 10, &val);
+ if (config)
+ return config;
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return -EIO;
+ }
+ mutex_lock(&data->update_lock);
+ switch (val) {
+ case 2:
+ config &= ~AMC6821_CONF4_PSPR;
+ data->fan1_div = 2;
+ break;
+ case 4:
+ config |= AMC6821_CONF4_PSPR;
+ data->fan1_div = 4;
+ break;
+ default:
+ count = -EINVAL;
+ goto EXIT;
+ }
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ count = -EIO;
+ }
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ get_temp, NULL, IDX_TEMP1_INPUT);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_MIN);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_CRIT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_MIN);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+ get_temp, NULL, IDX_TEMP2_INPUT);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_MIN);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_MAX);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO,
+ get_temp2_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_MIN);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_MAX);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_CRIT);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, IDX_FAN1_INPUT);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
+ get_fan, set_fan, IDX_FAN1_MIN);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO | S_IWUSR,
+ get_fan, set_fan, IDX_FAN1_MAX);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan1_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
+ get_fan1_div, set_fan1_div, 0);
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm1, set_pwm1, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+ get_pwm1_enable, set_pwm1_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO,
+ get_pwm1_auto_point_pwm, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
+ get_pwm1_auto_point_pwm, set_pwm1_auto_point_pwm, 1);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO,
+ get_pwm1_auto_point_pwm, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
+ get_pwm1_auto_channels_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO,
+ get_temp_auto_point_temp, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point3_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point3_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 2);
+
+
+
+static struct attribute *amc6821_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan1_div.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group amc6821_attr_grp = {
+ .attrs = amc6821_attrs,
+};
+
+
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int amc6821_detect(
+ struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int address = client->addr;
+ int dev_id, comp_id;
+
+ dev_dbg(&adapter->dev, "amc6821_detect called.\n");
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_dbg(&adapter->dev,
+ "amc6821: I2C bus doesn't support byte mode, "
+ "skipping.\n");
+ return -ENODEV;
+ }
+
+ dev_id = i2c_smbus_read_byte_data(client, AMC6821_REG_DEV_ID);
+ comp_id = i2c_smbus_read_byte_data(client, AMC6821_REG_COMP_ID);
+ if (dev_id != 0x21 || comp_id != 0x49) {
+ dev_dbg(&adapter->dev,
+ "amc6821: detection failed at 0x%02x.\n",
+ address);
+ return -ENODEV;
+ }
+
+ /* Bit 7 of the address register is ignored, so we can check the
+ ID registers again */
+ dev_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_DEV_ID);
+ comp_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_COMP_ID);
+ if (dev_id != 0x21 || comp_id != 0x49) {
+ dev_dbg(&adapter->dev,
+ "amc6821: detection failed at 0x%02x.\n",
+ address);
+ return -ENODEV;
+ }
+
+ dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address);
+ strlcpy(info->type, "amc6821", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int amc6821_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct amc6821_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct amc6821_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "out of memory.\n");
+ return -ENOMEM;
+ }
+
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Initialize the amc6821 chip
+ */
+ err = amc6821_init_client(client);
+ if (err)
+ goto err_free;
+
+ err = sysfs_create_group(&client->dev.kobj, &amc6821_attr_grp);
+ if (err)
+ goto err_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (!IS_ERR(data->hwmon_dev))
+ return 0;
+
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "error registering hwmon device.\n");
+ sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
+err_free:
+ kfree(data);
+ return err;
+}
+
+static int amc6821_remove(struct i2c_client *client)
+{
+ struct amc6821_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
+
+ kfree(data);
+
+ return 0;
+}
+
+
+static int amc6821_init_client(struct i2c_client *client)
+{
+ int config;
+ int err = -EIO;
+
+ if (init) {
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config |= AMC6821_CONF4_MODE;
+
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4,
+ config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ dev_info(&client->dev, "Revision %d\n", config & 0x0f);
+
+ config &= ~AMC6821_CONF3_THERM_FAN_EN;
+
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3,
+ config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config &= ~AMC6821_CONF2_RTFIE;
+ config &= ~AMC6821_CONF2_LTOIE;
+ config &= ~AMC6821_CONF2_RTOIE;
+ if (i2c_smbus_write_byte_data(client,
+ AMC6821_REG_CONF2, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config &= ~AMC6821_CONF1_THERMOVIE;
+ config &= ~AMC6821_CONF1_FANIE;
+ config |= AMC6821_CONF1_START;
+ if (pwminv)
+ config |= AMC6821_CONF1_PWMINV;
+ else
+ config &= ~AMC6821_CONF1_PWMINV;
+
+ if (i2c_smbus_write_byte_data(
+ client, AMC6821_REG_CONF1, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static struct amc6821_data *amc6821_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int timeout = HZ;
+ u8 reg;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + timeout) ||
+ !data->valid) {
+
+ for (i = 0; i < TEMP_IDX_LEN; i++)
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ temp_reg[i]);
+
+ data->stat1 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_STAT1);
+ data->stat2 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_STAT2);
+
+ data->pwm1 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_DCY);
+ for (i = 0; i < FAN1_IDX_LEN; i++) {
+ data->fan[i] = i2c_smbus_read_byte_data(
+ client,
+ fan_reg_low[i]);
+ data->fan[i] += i2c_smbus_read_byte_data(
+ client,
+ fan_reg_hi[i]) << 8;
+ }
+ data->fan1_div = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_CONF4);
+ data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2;
+
+ data->pwm1_auto_point_pwm[0] = 0;
+ data->pwm1_auto_point_pwm[2] = 255;
+ data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_DCY_LOW_TEMP);
+
+ data->temp1_auto_point_temp[0] =
+ i2c_smbus_read_byte_data(client,
+ AMC6821_REG_PSV_TEMP);
+ data->temp2_auto_point_temp[0] =
+ data->temp1_auto_point_temp[0];
+ reg = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_LTEMP_FAN_CTRL);
+ data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1;
+ reg &= 0x07;
+ reg = 0x20 >> reg;
+ if (reg > 0)
+ data->temp1_auto_point_temp[2] =
+ data->temp1_auto_point_temp[1] +
+ (data->pwm1_auto_point_pwm[2] -
+ data->pwm1_auto_point_pwm[1]) / reg;
+ else
+ data->temp1_auto_point_temp[2] = 255;
+
+ reg = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_RTEMP_FAN_CTRL);
+ data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1;
+ reg &= 0x07;
+ reg = 0x20 >> reg;
+ if (reg > 0)
+ data->temp2_auto_point_temp[2] =
+ data->temp2_auto_point_temp[1] +
+ (data->pwm1_auto_point_pwm[2] -
+ data->pwm1_auto_point_pwm[1]) / reg;
+ else
+ data->temp2_auto_point_temp[2] = 255;
+
+ reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ reg = (reg >> 5) & 0x3;
+ switch (reg) {
+ case 0: /*open loop: software sets pwm1*/
+ data->pwm1_auto_channels_temp = 0;
+ data->pwm1_enable = 1;
+ break;
+ case 2: /*closed loop: remote T (temp2)*/
+ data->pwm1_auto_channels_temp = 2;
+ data->pwm1_enable = 2;
+ break;
+ case 3: /*closed loop: local and remote T (temp2)*/
+ data->pwm1_auto_channels_temp = 3;
+ data->pwm1_enable = 3;
+ break;
+ case 1: /*semi-open loop: software sets rpm, chip controls pwm1,
+ *currently not implemented
+ */
+ data->pwm1_auto_channels_temp = 0;
+ data->pwm1_enable = 0;
+ break;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+
+static int __init amc6821_init(void)
+{
+ return i2c_add_driver(&amc6821_driver);
+}
+
+static void __exit amc6821_exit(void)
+{
+ i2c_del_driver(&amc6821_driver);
+}
+
+module_init(amc6821_init);
+module_exit(amc6821_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("T. Mertelj <tomaz.mertelj@guest.arnes.si>");
+MODULE_DESCRIPTION("Texas Instruments amc6821 hwmon driver");
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 5a3ee00..028284f 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -5,6 +5,7 @@
* See COPYING in the top level directory of the kernel tree.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/list.h>
@@ -101,6 +102,11 @@ struct atk_data {
int temperature_count;
int fan_count;
struct list_head sensor_list;
+
+ struct {
+ struct dentry *root;
+ u32 id;
+ } debugfs;
};
@@ -624,6 +630,187 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
return err;
}
+#ifdef CONFIG_DEBUG_FS
+static int atk_debugfs_gitm_get(void *p, u64 *val)
+{
+ struct atk_data *data = p;
+ union acpi_object *ret;
+ struct atk_acpi_ret_buffer *buf;
+ int err = 0;
+
+ if (!data->read_handle)
+ return -ENODEV;
+
+ if (!data->debugfs.id)
+ return -EINVAL;
+
+ ret = atk_gitm(data, data->debugfs.id);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ buf = (struct atk_acpi_ret_buffer *)ret->buffer.pointer;
+ if (buf->flags)
+ *val = buf->value;
+ else
+ err = -EIO;
+
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
+ atk_debugfs_gitm_get,
+ NULL,
+ "0x%08llx\n")
+
+static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
+{
+ int ret = 0;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ ret = snprintf(buf, sz, "0x%08llx\n", obj->integer.value);
+ break;
+ case ACPI_TYPE_STRING:
+ ret = snprintf(buf, sz, "%s\n", obj->string.pointer);
+ break;
+ }
+
+ return ret;
+}
+
+static void atk_pack_print(char *buf, size_t sz, union acpi_object *pack)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pack->package.count; i++) {
+ union acpi_object *obj = &pack->package.elements[i];
+
+ ret = atk_acpi_print(buf, sz, obj);
+ if (ret >= sz)
+ break;
+ buf += ret;
+ sz -= ret;
+ }
+}
+
+static int atk_debugfs_ggrp_open(struct inode *inode, struct file *file)
+{
+ struct atk_data *data = inode->i_private;
+ char *buf = NULL;
+ union acpi_object *ret;
+ u8 cls;
+ int i;
+
+ if (!data->enumerate_handle)
+ return -ENODEV;
+ if (!data->debugfs.id)
+ return -EINVAL;
+
+ cls = (data->debugfs.id & 0xff000000) >> 24;
+ ret = atk_ggrp(data, cls);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ for (i = 0; i < ret->package.count; i++) {
+ union acpi_object *pack = &ret->package.elements[i];
+ union acpi_object *id;
+
+ if (pack->type != ACPI_TYPE_PACKAGE)
+ continue;
+ if (!pack->package.count)
+ continue;
+ id = &pack->package.elements[0];
+ if (id->integer.value == data->debugfs.id) {
+ /* Print the package */
+ buf = kzalloc(512, GFP_KERNEL);
+ if (!buf) {
+ ACPI_FREE(ret);
+ return -ENOMEM;
+ }
+ atk_pack_print(buf, 512, pack);
+ break;
+ }
+ }
+ ACPI_FREE(ret);
+
+ if (!buf)
+ return -EINVAL;
+
+ file->private_data = buf;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t atk_debugfs_ggrp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char *str = file->private_data;
+ size_t len = strlen(str);
+
+ return simple_read_from_buffer(buf, count, pos, str, len);
+}
+
+static int atk_debugfs_ggrp_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations atk_debugfs_ggrp_fops = {
+ .read = atk_debugfs_ggrp_read,
+ .open = atk_debugfs_ggrp_open,
+ .release = atk_debugfs_ggrp_release,
+};
+
+static void atk_debugfs_init(struct atk_data *data)
+{
+ struct dentry *d;
+ struct dentry *f;
+
+ data->debugfs.id = 0;
+
+ d = debugfs_create_dir("asus_atk0110", NULL);
+ if (!d || IS_ERR(d))
+ return;
+
+ f = debugfs_create_x32("id", S_IRUSR | S_IWUSR, d, &data->debugfs.id);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ f = debugfs_create_file("gitm", S_IRUSR, d, data,
+ &atk_debugfs_gitm);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ f = debugfs_create_file("ggrp", S_IRUSR, d, data,
+ &atk_debugfs_ggrp_fops);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ data->debugfs.root = d;
+
+ return;
+cleanup:
+ debugfs_remove_recursive(d);
+}
+
+static void atk_debugfs_cleanup(struct atk_data *data)
+{
+ debugfs_remove_recursive(data->debugfs.root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void atk_debugfs_init(struct atk_data *data)
+{
+}
+
+static void atk_debugfs_cleanup(struct atk_data *data)
+{
+}
+#endif
+
static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
{
struct device *dev = &data->acpi_dev->dev;
@@ -1047,76 +1234,75 @@ remove:
return err;
}
-static int atk_check_old_if(struct atk_data *data)
+static int atk_probe_if(struct atk_data *data)
{
struct device *dev = &data->acpi_dev->dev;
acpi_handle ret;
acpi_status status;
+ int err = 0;
/* RTMP: read temperature */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rtmp_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rtmp_handle = ret;
/* RVLT: read voltage */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rvlt_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rvlt_handle = ret;
/* RFAN: read fan status */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rfan_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rfan_handle = ret;
-
- return 0;
-}
-
-static int atk_check_new_if(struct atk_data *data)
-{
- struct device *dev = &data->acpi_dev->dev;
- acpi_handle ret;
- acpi_status status;
/* Enumeration */
status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->enumerate_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->enumerate_handle = ret;
/* De-multiplexer (read) */
status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->read_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->read_handle = ret;
/* De-multiplexer (write) */
status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret);
- if (status != AE_OK) {
- dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
+ if (ACPI_SUCCESS(status))
+ data->write_handle = ret;
+ else
+ dev_dbg(dev, "method " METHOD_WRITE " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->write_handle = ret;
- return 0;
+ /* Check for hwmon methods: first check "old" style methods; note that
+ * both may be present: in this case we stick to the old interface;
+ * analysis of multiple DSDTs indicates that when both interfaces
+ * are present the new one (GGRP/GITM) is not functional.
+ */
+ if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+ data->old_interface = true;
+ else if (data->enumerate_handle && data->read_handle &&
+ data->write_handle)
+ data->old_interface = false;
+ else
+ err = -ENODEV;
+
+ return err;
}
static int atk_add(struct acpi_device *device)
@@ -1143,40 +1329,30 @@ static int atk_add(struct acpi_device *device)
&buf, ACPI_TYPE_PACKAGE);
if (ret != AE_OK) {
dev_dbg(&device->dev, "atk: method MBIF not found\n");
- err = -ENODEV;
- goto out;
+ } else {
+ obj = buf.pointer;
+ if (obj->package.count >= 2) {
+ union acpi_object *id = &obj->package.elements[1];
+ if (id->type == ACPI_TYPE_STRING)
+ dev_dbg(&device->dev, "board ID = %s\n",
+ id->string.pointer);
+ }
+ ACPI_FREE(buf.pointer);
}
- obj = buf.pointer;
- if (obj->package.count >= 2 &&
- obj->package.elements[1].type == ACPI_TYPE_STRING) {
- dev_dbg(&device->dev, "board ID = %s\n",
- obj->package.elements[1].string.pointer);
+ err = atk_probe_if(data);
+ if (err) {
+ dev_err(&device->dev, "No usable hwmon interface detected\n");
+ goto out;
}
- ACPI_FREE(buf.pointer);
- /* Check for hwmon methods: first check "old" style methods; note that
- * both may be present: in this case we stick to the old interface;
- * analysis of multiple DSDTs indicates that when both interfaces
- * are present the new one (GGRP/GITM) is not functional.
- */
- err = atk_check_old_if(data);
- if (!err) {
+ if (data->old_interface) {
dev_dbg(&device->dev, "Using old hwmon interface\n");
- data->old_interface = true;
+ err = atk_enumerate_old_hwmon(data);
} else {
- err = atk_check_new_if(data);
- if (err)
- goto out;
-
dev_dbg(&device->dev, "Using new hwmon interface\n");
- data->old_interface = false;
- }
-
- if (data->old_interface)
- err = atk_enumerate_old_hwmon(data);
- else
err = atk_enumerate_new_hwmon(data);
+ }
if (err < 0)
goto out;
if (err == 0) {
@@ -1190,6 +1366,8 @@ static int atk_add(struct acpi_device *device)
if (err)
goto cleanup;
+ atk_debugfs_init(data);
+
device->driver_data = data;
return 0;
cleanup:
@@ -1208,6 +1386,8 @@ static int atk_remove(struct acpi_device *device, int type)
device->driver_data = NULL;
+ atk_debugfs_cleanup(data);
+
atk_remove_files(data);
atk_free_sensors(data);
hwmon_device_unregister(data->hwmon_dev);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index caef39c..2d7bcee 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
+#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -161,6 +162,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
int usemsr_ee = 1;
int err;
u32 eax, edx;
+ struct pci_dev *host_bridge;
/* Early chips have no MSR for TjMax */
@@ -168,11 +170,21 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
usemsr_ee = 0;
}
- /* Atoms seems to have TjMax at 90C */
+ /* Atom CPUs */
if (c->x86_model == 0x1c) {
usemsr_ee = 0;
- tjmax = 90000;
+
+ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+
+ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
+ && (host_bridge->device == 0xa000 /* NM10 based nettop */
+ || host_bridge->device == 0xa010)) /* NM10 based netbook */
+ tjmax = 100000;
+ else
+ tjmax = 90000;
+
+ pci_dev_put(host_bridge);
}
if ((c->x86_model > 0xe) && (usemsr_ee)) {
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index bd0fc67..fa07282 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -768,6 +768,7 @@ leave:
static int watchdog_open(struct inode *inode, struct file *filp)
{
struct fschmd_data *pos, *data = NULL;
+ int watchdog_is_open;
/* We get called from drivers/char/misc.c with misc_mtx hold, and we
call misc_register() from fschmd_probe() with watchdog_data_mutex
@@ -782,10 +783,12 @@ static int watchdog_open(struct inode *inode, struct file *filp)
}
}
/* Note we can never not have found data, so we don't check for this */
- kref_get(&data->kref);
+ watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
+ if (!watchdog_is_open)
+ kref_get(&data->kref);
mutex_unlock(&watchdog_data_mutex);
- if (test_and_set_bit(0, &data->watchdog_is_open))
+ if (watchdog_is_open)
return -EBUSY;
/* Start the watchdog */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
new file mode 100644
index 0000000..099a213
--- /dev/null
+++ b/drivers/hwmon/k10temp.c
@@ -0,0 +1,223 @@
+/*
+ * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ *
+ * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ *
+ *
+ * This driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <asm/processor.h>
+
+MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL");
+
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+
+/* CPUID function 0x80000001, ebx */
+#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_F 0x00000000
+#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
+
+/* DRAM controller (PCI function 2) */
+#define REG_DCT0_CONFIG_HIGH 0x094
+#define DDR3_MODE 0x00000100
+
+/* miscellaneous (PCI function 3) */
+#define REG_HARDWARE_THERMAL_CONTROL 0x64
+#define HTC_ENABLE 0x00000001
+
+#define REG_REPORTED_TEMPERATURE 0xa4
+
+#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
+#define NB_CAP_HTC 0x00000400
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 regval;
+
+ pci_read_config_dword(to_pci_dev(dev),
+ REG_REPORTED_TEMPERATURE, &regval);
+ return sprintf(buf, "%u\n", (regval >> 21) * 125);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", 70 * 1000);
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int show_hyst = attr->index;
+ u32 regval;
+ int value;
+
+ pci_read_config_dword(to_pci_dev(dev),
+ REG_HARDWARE_THERMAL_CONTROL, &regval);
+ value = ((regval >> 16) & 0x7f) * 500 + 52000;
+ if (show_hyst)
+ value -= ((regval >> 24) & 0xf) * 500;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "k10temp\n");
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static bool __devinit has_erratum_319(struct pci_dev *pdev)
+{
+ u32 pkg_type, reg_dram_cfg;
+
+ if (boot_cpu_data.x86 != 0x10)
+ return false;
+
+ /*
+ * Erratum 319: The thermal sensor of Socket F/AM2+ processors
+ * may be unreliable.
+ */
+ pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
+ if (pkg_type == CPUID_PKGTYPE_F)
+ return true;
+ if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
+ return false;
+
+ /* Differentiate between AM2+ (bad) and AM3 (good) */
+ pci_bus_read_config_dword(pdev->bus,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
+ REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
+ return !(reg_dram_cfg & DDR3_MODE);
+}
+
+static int __devinit k10temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *hwmon_dev;
+ u32 reg_caps, reg_htc;
+ int unreliable = has_erratum_319(pdev);
+ int err;
+
+ if (unreliable && !force) {
+ dev_err(&pdev->dev,
+ "unreliable CPU thermal sensor; monitoring disabled\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
+ if (err)
+ goto exit;
+ err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
+ if (err)
+ goto exit_remove;
+
+ pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, &reg_caps);
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, &reg_htc);
+ if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
+ err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ if (err)
+ goto exit_remove;
+ err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_name);
+ if (err)
+ goto exit_remove;
+
+ hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto exit_remove;
+ }
+ dev_set_drvdata(&pdev->dev, hwmon_dev);
+
+ if (unreliable && force)
+ dev_warn(&pdev->dev,
+ "unreliable CPU thermal sensor; check erratum 319\n");
+ return 0;
+
+exit_remove:
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_input);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_max);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+exit:
+ return err;
+}
+
+static void __devexit k10temp_remove(struct pci_dev *pdev)
+{
+ hwmon_device_unregister(dev_get_drvdata(&pdev->dev));
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_input);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_max);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, k10temp_id_table);
+
+static struct pci_driver k10temp_driver = {
+ .name = "k10temp",
+ .id_table = k10temp_id_table,
+ .probe = k10temp_probe,
+ .remove = __devexit_p(k10temp_remove),
+};
+
+static int __init k10temp_init(void)
+{
+ return pci_register_driver(&k10temp_driver);
+}
+
+static void __exit k10temp_exit(void)
+{
+ pci_unregister_driver(&k10temp_driver);
+}
+
+module_init(k10temp_init)
+module_exit(k10temp_exit)
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 1fe9951..0ceb6d6 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -136,7 +136,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct pci_device_id k8temp_ids[] = {
+static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
new file mode 100644
index 0000000..dc1f540
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -0,0 +1,183 @@
+/*
+ * drivers/hwmon/lis3lv02d_i2c.c
+ *
+ * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
+ * Driver is based on corresponding SPI driver written by Daniel Mack
+ * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ).
+ *
+ * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "lis3lv02d.h"
+
+#define DRV_NAME "lis3lv02d_i2c"
+
+static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ return i2c_smbus_write_byte_data(c, reg, value);
+}
+
+static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ *v = i2c_smbus_read_byte_data(c, reg);
+ return 0;
+}
+
+static int lis3_i2c_init(struct lis3lv02d *lis3)
+{
+ u8 reg;
+ int ret;
+
+ /* power up the device */
+ ret = lis3->read(lis3, CTRL_REG1, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg |= CTRL1_PD0;
+ return lis3->write(lis3, CTRL_REG1, reg);
+}
+
+/* Default axis mapping but it can be overwritten by platform data */
+static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
+ LIS3_DEV_Y,
+ LIS3_DEV_Z };
+
+static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata) {
+ if (pdata->axis_x)
+ lis3lv02d_axis_map.x = pdata->axis_x;
+
+ if (pdata->axis_y)
+ lis3lv02d_axis_map.y = pdata->axis_y;
+
+ if (pdata->axis_z)
+ lis3lv02d_axis_map.z = pdata->axis_z;
+
+ if (pdata->setup_resources)
+ ret = pdata->setup_resources();
+
+ if (ret)
+ goto fail;
+ }
+
+ lis3_dev.pdata = pdata;
+ lis3_dev.bus_priv = client;
+ lis3_dev.init = lis3_i2c_init;
+ lis3_dev.read = lis3_i2c_read;
+ lis3_dev.write = lis3_i2c_write;
+ lis3_dev.irq = client->irq;
+ lis3_dev.ac = lis3lv02d_axis_map;
+
+ i2c_set_clientdata(client, &lis3_dev);
+ ret = lis3lv02d_init_device(&lis3_dev);
+fail:
+ return ret;
+}
+
+static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata && pdata->release_resources)
+ pdata->release_resources();
+
+ lis3lv02d_joystick_disable();
+ lis3lv02d_poweroff(lis3);
+
+ return lis3lv02d_remove_fs(&lis3_dev);
+}
+
+#ifdef CONFIG_PM
+static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(lis3);
+ return 0;
+}
+
+static int lis3lv02d_i2c_resume(struct i2c_client *client)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweron(lis3);
+ return 0;
+}
+
+static void lis3lv02d_i2c_shutdown(struct i2c_client *client)
+{
+ lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
+}
+#else
+#define lis3lv02d_i2c_suspend NULL
+#define lis3lv02d_i2c_resume NULL
+#define lis3lv02d_i2c_shutdown NULL
+#endif
+
+static const struct i2c_device_id lis3lv02d_id[] = {
+ {"lis3lv02d", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
+
+static struct i2c_driver lis3lv02d_i2c_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .suspend = lis3lv02d_i2c_suspend,
+ .shutdown = lis3lv02d_i2c_shutdown,
+ .resume = lis3lv02d_i2c_resume,
+ .probe = lis3lv02d_i2c_probe,
+ .remove = __devexit_p(lis3lv02d_i2c_remove),
+ .id_table = lis3lv02d_id,
+};
+
+static int __init lis3lv02d_init(void)
+{
+ return i2c_add_driver(&lis3lv02d_i2c_driver);
+}
+
+static void __exit lis3lv02d_exit(void)
+{
+ i2c_del_driver(&lis3lv02d_i2c_driver);
+}
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("lis3lv02d I2C interface");
+MODULE_LICENSE("GPL");
+
+module_init(lis3lv02d_init);
+module_exit(lis3lv02d_exit);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd9..72ff2c4 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
static int __init lm78_isa_found(unsigned short address)
{
int val, save, found = 0;
-
- /* We have to request the region in two parts because some
- boards declare base+4 to base+7 as a PNP device */
- if (!request_region(address, 4, "lm78")) {
- pr_debug("lm78: Failed to request low part of region\n");
- return 0;
- }
- if (!request_region(address + 4, 4, "lm78")) {
- pr_debug("lm78: Failed to request high part of region\n");
- release_region(address, 4);
- return 0;
+ int port;
+
+ /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+ * to base+7 and some base+5 to base+6. So we better request each port
+ * individually for the probing phase. */
+ for (port = address; port < address + LM78_EXTENT; port++) {
+ if (!request_region(port, 1, "lm78")) {
+ pr_debug("lm78: Failed to request port 0x%x\n", port);
+ goto release;
+ }
}
#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
val & 0x80 ? "LM79" : "LM78", (int)address);
release:
- release_region(address + 4, 4);
- release_region(address, 4);
+ for (port--; port >= address; port--)
+ release_region(port, 1);
return found;
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index ebe38b6..864a371 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
int d1 = 0;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(temppoints); i++)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
- const int c3 = 2800; /* x10 ^ -9 */
+ const int c3 = -2800; /* x10 ^ -9 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
+ (data->val_humid * data->val_humid * c3)/1000000;
- return (temp - 25000) * (10000 + 800 * data->val_humid)
+ return (temp - 25000) * (10000 + 80 * data->val_humid)
/ 1000000 + RHlinear;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 12f2e70..79c2931 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -697,7 +697,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
return data;
}
-static struct pci_device_id sis5595_pci_ids[] = {
+static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8ad50fd..8fa462f 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -136,11 +136,11 @@ struct smsc47m1_data {
struct smsc47m1_sio_data {
enum chips type;
+ u8 activate; /* Remember initial device state */
};
-static int smsc47m1_probe(struct platform_device *pdev);
-static int __devexit smsc47m1_remove(struct platform_device *pdev);
+static int __exit smsc47m1_remove(struct platform_device *pdev);
static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
int init);
@@ -160,8 +160,7 @@ static struct platform_driver smsc47m1_driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
- .probe = smsc47m1_probe,
- .remove = __devexit_p(smsc47m1_remove),
+ .remove = __exit_p(smsc47m1_remove),
};
static ssize_t get_fan(struct device *dev, struct device_attribute
@@ -470,24 +469,126 @@ static int __init smsc47m1_find(unsigned short *addr,
superio_select();
*addr = (superio_inb(SUPERIO_REG_BASE) << 8)
| superio_inb(SUPERIO_REG_BASE + 1);
- val = superio_inb(SUPERIO_REG_ACT);
- if (*addr == 0 || (val & 0x01) == 0) {
- pr_info(DRVNAME ": Device is disabled, will not use\n");
+ if (*addr == 0) {
+ pr_info(DRVNAME ": Device address not set, will not use\n");
superio_exit();
return -ENODEV;
}
+ /* Enable only if address is set (needed at least on the
+ * Compaq Presario S4000NX) */
+ sio_data->activate = superio_inb(SUPERIO_REG_ACT);
+ if ((sio_data->activate & 0x01) == 0) {
+ pr_info(DRVNAME ": Enabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
+ }
+
superio_exit();
return 0;
}
-static int __devinit smsc47m1_probe(struct platform_device *pdev)
+/* Restore device to its initial state */
+static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
+{
+ if ((sio_data->activate & 0x01) == 0) {
+ superio_enter();
+ superio_select();
+
+ pr_info(DRVNAME ": Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+
+ superio_exit();
+ }
+}
+
+#define CHECK 1
+#define REQUEST 2
+#define RELEASE 3
+
+/*
+ * This function can be used to:
+ * - test for resource conflicts with ACPI
+ * - request the resources
+ * - release the resources
+ * We only allocate the I/O ports we really need, to minimize the risk of
+ * conflicts with ACPI or with other drivers.
+ */
+static int smsc47m1_handle_resources(unsigned short address, enum chips type,
+ int action, struct device *dev)
+{
+ static const u8 ports_m1[] = {
+ /* register, region length */
+ 0x04, 1,
+ 0x33, 4,
+ 0x56, 7,
+ };
+
+ static const u8 ports_m2[] = {
+ /* register, region length */
+ 0x04, 1,
+ 0x09, 1,
+ 0x2c, 2,
+ 0x35, 4,
+ 0x56, 7,
+ 0x69, 4,
+ };
+
+ int i, ports_size, err;
+ const u8 *ports;
+
+ switch (type) {
+ case smsc47m1:
+ default:
+ ports = ports_m1;
+ ports_size = ARRAY_SIZE(ports_m1);
+ break;
+ case smsc47m2:
+ ports = ports_m2;
+ ports_size = ARRAY_SIZE(ports_m2);
+ break;
+ }
+
+ for (i = 0; i + 1 < ports_size; i += 2) {
+ unsigned short start = address + ports[i];
+ unsigned short len = ports[i + 1];
+
+ switch (action) {
+ case CHECK:
+ /* Only check for conflicts */
+ err = acpi_check_region(start, len, DRVNAME);
+ if (err)
+ return err;
+ break;
+ case REQUEST:
+ /* Request the resources */
+ if (!request_region(start, len, DRVNAME)) {
+ dev_err(dev, "Region 0x%hx-0x%hx already in "
+ "use!\n", start, start + len);
+
+ /* Undo all requests */
+ for (i -= 2; i >= 0; i -= 2)
+ release_region(address + ports[i],
+ ports[i + 1]);
+ return -EBUSY;
+ }
+ break;
+ case RELEASE:
+ /* Release the resources */
+ release_region(start, len);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int __init smsc47m1_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47m1_sio_data *sio_data = dev->platform_data;
struct smsc47m1_data *data;
struct resource *res;
- int err = 0;
+ int err;
int fan1, fan2, fan3, pwm1, pwm2, pwm3;
static const char *names[] = {
@@ -496,12 +597,10 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SMSC_EXTENT, DRVNAME)) {
- dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
- (unsigned long)res->start,
- (unsigned long)res->end);
- return -EBUSY;
- }
+ err = smsc47m1_handle_resources(res->start, sio_data->type,
+ REQUEST, dev);
+ if (err < 0)
+ return err;
if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) {
err = -ENOMEM;
@@ -637,11 +736,11 @@ error_free:
platform_set_drvdata(pdev, NULL);
kfree(data);
error_release:
- release_region(res->start, SMSC_EXTENT);
+ smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
return err;
}
-static int __devexit smsc47m1_remove(struct platform_device *pdev)
+static int __exit smsc47m1_remove(struct platform_device *pdev)
{
struct smsc47m1_data *data = platform_get_drvdata(pdev);
struct resource *res;
@@ -650,7 +749,7 @@ static int __devexit smsc47m1_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, SMSC_EXTENT);
+ smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
platform_set_drvdata(pdev, NULL);
kfree(data);
@@ -717,7 +816,7 @@ static int __init smsc47m1_device_add(unsigned short address,
};
int err;
- err = acpi_check_resource_conflict(&res);
+ err = smsc47m1_handle_resources(address, sio_data->type, CHECK, NULL);
if (err)
goto exit;
@@ -766,27 +865,29 @@ static int __init sm_smsc47m1_init(void)
if (smsc47m1_find(&address, &sio_data))
return -ENODEV;
- err = platform_driver_register(&smsc47m1_driver);
+ /* Sets global pdev as a side effect */
+ err = smsc47m1_device_add(address, &sio_data);
if (err)
goto exit;
- /* Sets global pdev as a side effect */
- err = smsc47m1_device_add(address, &sio_data);
+ err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
if (err)
- goto exit_driver;
+ goto exit_device;
return 0;
-exit_driver:
- platform_driver_unregister(&smsc47m1_driver);
+exit_device:
+ platform_device_unregister(pdev);
+ smsc47m1_restore(&sio_data);
exit:
return err;
}
static void __exit sm_smsc47m1_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&smsc47m1_driver);
+ smsc47m1_restore(pdev->dev.platform_data);
+ platform_device_unregister(pdev);
}
MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
new file mode 100644
index 0000000..7442cf7
--- /dev/null
+++ b/drivers/hwmon/via-cputemp.c
@@ -0,0 +1,356 @@
+/*
+ * via-cputemp.c - Driver for VIA CPU core temperature monitoring
+ * Copyright (C) 2009 VIA Technologies, Inc.
+ *
+ * based on existing coretemp.c, which is
+ *
+ * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#define DRVNAME "via_cputemp"
+
+enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
+
+/*
+ * Functions declaration
+ */
+
+struct via_cputemp_data {
+ struct device *hwmon_dev;
+ const char *name;
+ u32 id;
+ u32 msr;
+};
+
+/*
+ * Sysfs stuff
+ */
+
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ int ret;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct via_cputemp_data *data = dev_get_drvdata(dev);
+
+ if (attr->index == SHOW_NAME)
+ ret = sprintf(buf, "%s\n", data->name);
+ else /* show label */
+ ret = sprintf(buf, "Core %d\n", data->id);
+ return ret;
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct via_cputemp_data *data = dev_get_drvdata(dev);
+ u32 eax, edx;
+ int err;
+
+ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ if (err)
+ return -EAGAIN;
+
+ return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
+ SHOW_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
+
+static struct attribute *via_cputemp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group via_cputemp_group = {
+ .attrs = via_cputemp_attributes,
+};
+
+static int __devinit via_cputemp_probe(struct platform_device *pdev)
+{
+ struct via_cputemp_data *data;
+ struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+ int err;
+ u32 eax, edx;
+
+ data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Out of memory\n");
+ goto exit;
+ }
+
+ data->id = pdev->id;
+ data->name = "via_cputemp";
+
+ switch (c->x86_model) {
+ case 0xA:
+ /* C7 A */
+ case 0xD:
+ /* C7 D */
+ data->msr = 0x1169;
+ break;
+ case 0xF:
+ /* Nano */
+ data->msr = 0x1423;
+ break;
+ default:
+ err = -ENODEV;
+ goto exit_free;
+ }
+
+ /* test if we can access the TEMPERATURE MSR */
+ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to access TEMPERATURE MSR, giving up\n");
+ goto exit_free;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n",
+ err);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
+exit_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit via_cputemp_remove(struct platform_device *pdev)
+{
+ struct via_cputemp_data *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver via_cputemp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRVNAME,
+ },
+ .probe = via_cputemp_probe,
+ .remove = __devexit_p(via_cputemp_remove),
+};
+
+struct pdev_entry {
+ struct list_head list;
+ struct platform_device *pdev;
+ unsigned int cpu;
+};
+
+static LIST_HEAD(pdev_list);
+static DEFINE_MUTEX(pdev_list_mutex);
+
+static int __cpuinit via_cputemp_device_add(unsigned int cpu)
+{
+ int err;
+ struct platform_device *pdev;
+ struct pdev_entry *pdev_entry;
+
+ pdev = platform_device_alloc(DRVNAME, cpu);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ goto exit;
+ }
+
+ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
+ if (!pdev_entry) {
+ err = -ENOMEM;
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+ err);
+ goto exit_device_free;
+ }
+
+ pdev_entry->pdev = pdev;
+ pdev_entry->cpu = cpu;
+ mutex_lock(&pdev_list_mutex);
+ list_add_tail(&pdev_entry->list, &pdev_list);
+ mutex_unlock(&pdev_list_mutex);
+
+ return 0;
+
+exit_device_free:
+ kfree(pdev_entry);
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void via_cputemp_device_remove(unsigned int cpu)
+{
+ struct pdev_entry *p, *n;
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ if (p->cpu == cpu) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ }
+ mutex_unlock(&pdev_list_mutex);
+}
+
+static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ via_cputemp_device_add(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ via_cputemp_device_remove(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block via_cputemp_cpu_notifier __refdata = {
+ .notifier_call = via_cputemp_cpu_callback,
+};
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+static int __init via_cputemp_init(void)
+{
+ int i, err;
+ struct pdev_entry *p, *n;
+
+ if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
+ printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ err = platform_driver_register(&via_cputemp_driver);
+ if (err)
+ goto exit;
+
+ for_each_online_cpu(i) {
+ struct cpuinfo_x86 *c = &cpu_data(i);
+
+ if (c->x86 != 6)
+ continue;
+
+ if (c->x86_model < 0x0a)
+ continue;
+
+ if (c->x86_model > 0x0f) {
+ printk(KERN_WARNING DRVNAME ": Unknown CPU "
+ "model 0x%x\n", c->x86_model);
+ continue;
+ }
+
+ err = via_cputemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
+ }
+ if (list_empty(&pdev_list)) {
+ err = -ENODEV;
+ goto exit_driver_unreg;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&via_cputemp_cpu_notifier);
+#endif
+ return 0;
+
+exit_devices_unreg:
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+exit_driver_unreg:
+ platform_driver_unregister(&via_cputemp_driver);
+exit:
+ return err;
+}
+
+static void __exit via_cputemp_exit(void)
+{
+ struct pdev_entry *p, *n;
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
+#endif
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+ platform_driver_unregister(&via_cputemp_driver);
+}
+
+MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
+MODULE_DESCRIPTION("VIA CPU temperature monitor");
+MODULE_LICENSE("GPL");
+
+module_init(via_cputemp_init)
+module_exit(via_cputemp_exit)
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 39e82a4..f397ce7 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -767,7 +767,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
return data;
}
-static struct pci_device_id via686a_pci_ids[] = {
+static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
{ 0, }
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 470a122..d47b4c9 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -697,7 +697,7 @@ static struct platform_driver vt8231_driver = {
.remove = __devexit_p(vt8231_remove),
};
-static struct pci_device_id vt8231_pci_ids[] = {
+static const struct pci_device_id vt8231_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
{ 0, }
};
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index b257c72..38e2805 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1135,6 +1135,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
"W83687THF",
};
+ sio_data->sioaddr = sioaddr;
superio_enter(sio_data);
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
@@ -1177,7 +1178,6 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
}
err = 0;
- sio_data->sioaddr = sioaddr;
pr_info(DRVNAME ": Found %s chip at %#x\n",
names[sio_data->type], *addr);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225..32d4ade 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
w83781d_isa_found(unsigned short address)
{
int val, save, found = 0;
-
- /* We have to request the region in two parts because some
- boards declare base+4 to base+7 as a PNP device */
- if (!request_region(address, 4, "w83781d")) {
- pr_debug("w83781d: Failed to request low part of region\n");
- return 0;
- }
- if (!request_region(address + 4, 4, "w83781d")) {
- pr_debug("w83781d: Failed to request high part of region\n");
- release_region(address, 4);
- return 0;
+ int port;
+
+ /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+ * to base+7 and some base+5 to base+6. So we better request each port
+ * individually for the probing phase. */
+ for (port = address; port < address + W83781D_EXTENT; port++) {
+ if (!request_region(port, 1, "w83781d")) {
+ pr_debug("w83781d: Failed to request port 0x%x\n",
+ port);
+ goto release;
+ }
}
#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
val == 0x30 ? "W83782D" : "W83781D", (int)address);
release:
- release_region(address + 4, 4);
- release_region(address, 4);
+ for (port--; port >= address; port--)
+ release_region(port, 1);
return found;
}
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index f70f465..4687af4 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -87,9 +87,9 @@ static int ali1563_transaction(struct i2c_adapter * a, int size)
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
+ } while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
@@ -157,9 +157,9 @@ static int ali1563_block_start(struct i2c_adapter * a)
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
+ } while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index b309ac2..fe3fb56 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -693,13 +693,13 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
}
/* Set TWI internal clock as 10MHz */
- write_CONTROL(iface, ((get_sclk() / 1024 / 1024 + 5) / 10) & 0x7F);
+ write_CONTROL(iface, ((get_sclk() / 1000 / 1000 + 5) / 10) & 0x7F);
/*
* We will not end up with a CLKDIV=0 because no one will specify
- * 20kHz SCL or less in Kconfig now. (5 * 1024 / 20 = 0x100)
+ * 20kHz SCL or less in Kconfig now. (5 * 1000 / 20 = 250)
*/
- clkhilow = 5 * 1024 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ;
+ clkhilow = ((10 * 1000 / CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ) + 1) / 2;
/* Set Twi interface clock as specified */
write_CLKDIV(iface, (clkhilow << 8) | clkhilow);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e3654d6..75bf820 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -226,7 +226,6 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
temp = readb(i2c_imx->base + IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
- i2c_imx->stopped = 1;
}
if (cpu_is_mx1()) {
/*
@@ -236,8 +235,10 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
udelay(i2c_imx->disable_delay);
}
- if (!i2c_imx->stopped)
+ if (!i2c_imx->stopped) {
i2c_imx_bus_busy(i2c_imx, 0);
+ i2c_imx->stopped = 1;
+ }
/* Disable I2C controller */
writeb(0, i2c_imx->base + IMX_I2C_I2CR);
@@ -496,22 +497,23 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
}
res_size = resource_size(res);
+
+ if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
+ ret = -EBUSY;
+ goto fail0;
+ }
+
base = ioremap(res->start, res_size);
if (!base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EIO;
- goto fail0;
+ goto fail1;
}
i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL);
if (!i2c_imx) {
dev_err(&pdev->dev, "can't allocate interface\n");
ret = -ENOMEM;
- goto fail1;
- }
-
- if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
- ret = -EBUSY;
goto fail2;
}
@@ -582,11 +584,11 @@ fail5:
fail4:
clk_put(i2c_imx->clk);
fail3:
- release_mem_region(i2c_imx->res->start, resource_size(res));
-fail2:
kfree(i2c_imx);
-fail1:
+fail2:
iounmap(base);
+fail1:
+ release_mem_region(res->start, resource_size(res));
fail0:
if (pdata && pdata->exit)
pdata->exit(&pdev->dev);
@@ -618,8 +620,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
clk_put(i2c_imx->clk);
- release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
iounmap(i2c_imx->base);
+ release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
kfree(i2c_imx);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 75bf3ad..0037e31 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -247,7 +247,13 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
}
dev->idle = 0;
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
+
+ /*
+ * Don't write to this register if the IE state is 0 as it can
+ * cause deadlock.
+ */
+ if (dev->iestate)
+ omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
}
static void omap_i2c_idle(struct omap_i2c_dev *dev)
@@ -280,6 +286,11 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long internal_clk = 0;
if (dev->rev >= OMAP_I2C_REV_2) {
+ /* Disable I2C controller before soft reset */
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
+ omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
+ ~(OMAP_I2C_CON_EN));
+
omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
/* For some reason we need to set the EN bit before the
* reset done bit gets set. */
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 0ed68e2..f7346a9 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd)
unsigned long timeout;
if (irq > -1) {
- ret = wait_event_interruptible_timeout(pca_wait,
+ ret = wait_event_timeout(pca_wait,
pca_isa_readbyte(pd, I2C_PCA_CON)
& I2C_PCA_CON_SI, pca_isa_ops.timeout);
} else {
@@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd)
}
static irqreturn_t pca_handler(int this_irq, void *dev_id) {
- wake_up_interruptible(&pca_wait);
+ wake_up(&pca_wait);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index c4df9d4..5b2213d 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
unsigned long timeout;
if (i2c->irq) {
- ret = wait_event_interruptible_timeout(i2c->wait,
+ ret = wait_event_timeout(i2c->wait,
i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
& I2C_PCA_CON_SI, i2c->adap.timeout);
} else {
@@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
return IRQ_NONE;
- wake_up_interruptible(&i2c->wait);
+ wake_up(&i2c->wait);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 1e245e9..e56e4b6 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -324,12 +324,12 @@ static int piix4_transaction(void)
else
msleep(1);
- while ((timeout++ < MAX_TIMEOUT) &&
+ while ((++timeout < MAX_TIMEOUT) &&
((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050f..e29b6d5 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/types.h>
/* include interfaces to usb layer */
#include <linux/usb.h>
@@ -31,8 +32,8 @@
#define CMD_I2C_IO_END (1<<1)
/* i2c bit delay, default is 10us -> 100kHz */
-static int delay = 10;
-module_param(delay, int, 0);
+static unsigned short delay = 10;
+module_param(delay, ushort, 0);
MODULE_PARM_DESC(delay, "bit delay in microseconds, "
"e.g. 10 for 100kHz (default is 100kHz)");
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
static u32 usb_func(struct i2c_adapter *adapter)
{
- u32 func;
+ __le32 func;
/* get functionality from adapter */
if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
return 0;
}
- return func;
+ return le32_to_cpu(func);
}
/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
"i2c-tiny-usb at bus %03d device %03d",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
- if (usb_write(&dev->adapter, CMD_SET_DELAY,
- cpu_to_le16(delay), 0, NULL, 0) != 0) {
+ if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
dev_err(&dev->adapter.dev,
"failure setting delay to %dus\n", delay);
retval = -EIO;
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index e4b1543..a84a909 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -165,10 +165,10 @@ static int vt596_transaction(u8 size)
do {
msleep(1);
temp = inb_p(SMBHSTSTS);
- } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT));
+ } while ((temp & 0x01) && (++timeout < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
result = -ETIMEDOUT;
dev_err(&vt596_adapter.dev, "SMBus timeout!\n");
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0ac2f90..10be7b5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -248,7 +248,7 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
NULL
};
-const static struct dev_pm_ops i2c_device_pm_ops = {
+static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
};
@@ -843,6 +843,9 @@ int i2c_del_adapter(struct i2c_adapter *adap)
adap->dev.parent);
#endif
+ /* device name is gone after device_unregister */
+ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
+
/* clean up the sysfs representation */
init_completion(&adap->dev_released);
device_unregister(&adap->dev);
@@ -855,8 +858,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
idr_remove(&i2c_adapter_idr, adap->nr);
mutex_unlock(&core_lock);
- dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
-
/* Clear the device structure in case this adapter is ever going to be
added again */
memset(&adap->dev, 0, sizeof(adap->dev));
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a04..dd25300 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
static u8 i7300_idle_thrtlow_saved;
static u32 i7300_idle_mc_saved;
-static cpumask_t idle_cpumask;
+static cpumask_var_t idle_cpumask;
static ktime_t start_ktime;
static unsigned long avg_idle_us;
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
- cpu_set(smp_processor_id(), idle_cpumask);
+ cpumask_set_cpu(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) != num_online_cpus())
+ if (cpumask_weight(idle_cpumask) != num_online_cpus())
goto end;
now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
i7300_idle_ioat_start();
} else if (val == IDLE_END) {
- cpu_clear(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
+ cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
+ if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
/* First CPU coming out of idle */
u64 idle_duration_us;
@@ -553,7 +553,6 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
spin_lock_init(&i7300_idle_lock);
- cpus_clear(idle_cpumask);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
if (i7300_idle_ioat_init())
return -ENODEV;
+ if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
if (debugfs_dir) {
int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
static void __exit i7300_idle_exit(void)
{
idle_notifier_unregister(&i7300_idle_nb);
+ free_cpumask_var(idle_cpumask);
if (debugfs_dir) {
int i = 0;
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index f102fcc..e02096c 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,8 +1,3 @@
-menu "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
-
-source "drivers/firewire/Kconfig"
-
config IEEE1394
tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
@@ -16,8 +11,13 @@ config IEEE1394
is the core support only, you will also need to select a driver for
your IEEE 1394 adapter.
- To compile this driver as a module, say M here: the
- module will be called ieee1394.
+ To compile this driver as a module, say M here: the module will be
+ called ieee1394.
+
+ NOTE:
+ ieee1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
config IEEE1394_OHCI1394
tristate "OHCI-1394 controllers"
@@ -29,19 +29,23 @@ config IEEE1394_OHCI1394
use one of these chipsets. It should work with any OHCI-1394
compliant card, however.
- To compile this driver as a module, say M here: the
- module will be called ohci1394.
+ To compile this driver as a module, say M here: the module will be
+ called ohci1394.
NOTE:
+ ohci1394 is superseded by the newer firewire-ohci driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
If you want to install firewire-ohci and ohci1394 together, you
should configure them only as modules and blacklist the driver(s)
which you don't want to have auto-loaded. Add either
- blacklist firewire-ohci
- or
blacklist ohci1394
blacklist video1394
blacklist dv1394
+ or
+ blacklist firewire-ohci
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
depending on your distribution.
@@ -58,8 +62,8 @@ config IEEE1394_PCILYNX
Instruments PCILynx chip. Note: this driver is written for revision
2 of this chip and may not work with revision 0.
- To compile this driver as a module, say M here: the
- module will be called pcilynx.
+ To compile this driver as a module, say M here: the module will be
+ called pcilynx.
Only some old and now very rare PCI and CardBus cards and
PowerMacs G3 B&W contain the PCILynx controller. Therefore
@@ -79,6 +83,14 @@ config IEEE1394_SBP2
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+ To compile this driver as a module, say M here: the module will be
+ called sbp2.
+
+ NOTE:
+ sbp2 is superseded by the newer firewire-sbp2 driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_SBP2_PHYS_DMA
bool "Enable replacement for physical DMA in SBP2"
depends on IEEE1394_SBP2 && VIRT_TO_BUS && EXPERIMENTAL
@@ -111,6 +123,11 @@ config IEEE1394_ETH1394
The module is called eth1394 although it does not emulate Ethernet.
+ NOTE:
+ eth1394 is superseded by the newer firewire-net driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_RAWIO
tristate "raw1394 userspace interface"
depends on IEEE1394
@@ -123,6 +140,11 @@ config IEEE1394_RAWIO
To compile this driver as a module, say M here: the module will be
called raw1394.
+ NOTE:
+ raw1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_VIDEO1394
tristate "video1394 userspace interface"
depends on IEEE1394 && IEEE1394_OHCI1394
@@ -136,13 +158,18 @@ config IEEE1394_VIDEO1394
To compile this driver as a module, say M here: the module will be
called video1394.
+ NOTE:
+ video1394 is superseded by the newer firewire-core driver. See
+ http://ieee1394.wiki.kernel.org/index.php/Juju_Migration for
+ further information on how to switch to the new FireWire drivers.
+
config IEEE1394_DV1394
tristate "dv1394 userspace interface (deprecated)"
depends on IEEE1394 && IEEE1394_OHCI1394
help
The dv1394 driver is unsupported and may be removed from Linux in a
- future release. Its functionality is now provided by raw1394 together
- with libraries such as libiec61883.
+ future release. Its functionality is now provided by either
+ raw1394 or firewire-core together with libraries such as libiec61883.
config IEEE1394_VERBOSEDEBUG
bool "Excessive debugging output"
@@ -153,5 +180,3 @@ config IEEE1394_VERBOSEDEBUG
will quickly result in large amounts of data sent to the system log.
Say Y if you really need the debugging output. Everyone else says N.
-
-endmenu
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index bd07803..abbb069 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -36,7 +36,6 @@
#include <linux/mutex.h>
#include <linux/inetdevice.h>
#include <linux/workqueue.h>
-#include <linux/if_arp.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
@@ -92,22 +91,12 @@ EXPORT_SYMBOL(rdma_addr_unregister_client);
int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
const unsigned char *dst_dev_addr)
{
- switch (dev->type) {
- case ARPHRD_INFINIBAND:
- dev_addr->dev_type = RDMA_NODE_IB_CA;
- break;
- case ARPHRD_ETHER:
- dev_addr->dev_type = RDMA_NODE_RNIC;
- break;
- default:
- return -EADDRNOTAVAIL;
- }
-
+ dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
if (dst_dev_addr)
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
- dev_addr->src_dev = dev;
+ dev_addr->bound_dev_if = dev->ifindex;
return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
@@ -117,6 +106,15 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
struct net_device *dev;
int ret = -EADDRNOTAVAIL;
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_put(dev);
+ return ret;
+ }
+
switch (addr->sa_family) {
case AF_INET:
dev = ip_dev_find(&init_net,
@@ -131,6 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
+ read_lock(&dev_base_lock);
for_each_netdev(&init_net, dev) {
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
@@ -139,6 +138,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
break;
}
}
+ read_unlock(&dev_base_lock);
break;
#endif
}
@@ -176,48 +176,9 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
-static void addr_send_arp(struct sockaddr *dst_in)
-{
- struct rtable *rt;
- struct flowi fl;
-
- memset(&fl, 0, sizeof fl);
-
- switch (dst_in->sa_family) {
- case AF_INET:
- fl.nl_u.ip4_u.daddr =
- ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
-
- if (ip_route_output_key(&init_net, &rt, &fl))
- return;
-
- neigh_event_send(rt->u.dst.neighbour, NULL);
- ip_rt_put(rt);
- break;
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case AF_INET6:
- {
- struct dst_entry *dst;
-
- fl.nl_u.ip6_u.daddr =
- ((struct sockaddr_in6 *) dst_in)->sin6_addr;
-
- dst = ip6_route_output(&init_net, NULL, &fl);
- if (!dst)
- return;
-
- neigh_event_send(dst->neighbour, NULL);
- dst_release(dst);
- break;
- }
-#endif
- }
-}
-
-static int addr4_resolve_remote(struct sockaddr_in *src_in,
- struct sockaddr_in *dst_in,
- struct rdma_dev_addr *addr)
+static int addr4_resolve(struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in,
+ struct rdma_dev_addr *addr)
{
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
@@ -229,10 +190,22 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = dst_ip;
fl.nl_u.ip4_u.saddr = src_ip;
+ fl.oif = addr->bound_dev_if;
+
ret = ip_route_output_key(&init_net, &rt, &fl);
if (ret)
goto out;
+ src_in->sin_family = AF_INET;
+ src_in->sin_addr.s_addr = rt->rt_src;
+
+ if (rt->idev->dev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ goto put;
+ }
+
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->idev->dev, NULL);
@@ -240,21 +213,14 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
}
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
- if (!neigh) {
+ if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ neigh_event_send(rt->u.dst.neighbour, NULL);
ret = -ENODATA;
+ if (neigh)
+ goto release;
goto put;
}
- if (!(neigh->nud_state & NUD_VALID)) {
- ret = -ENODATA;
- goto release;
- }
-
- if (!src_ip) {
- src_in->sin_family = dst_in->sin_family;
- src_in->sin_addr.s_addr = rt->rt_src;
- }
-
ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release:
neigh_release(neigh);
@@ -265,52 +231,77 @@ out:
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
struct flowi fl;
struct neighbour *neigh;
struct dst_entry *dst;
- int ret = -ENODATA;
+ int ret;
memset(&fl, 0, sizeof fl);
- fl.nl_u.ip6_u.daddr = dst_in->sin6_addr;
- fl.nl_u.ip6_u.saddr = src_in->sin6_addr;
+ ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
+ ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
+ fl.oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl);
- if (!dst)
- return ret;
+ if ((ret = dst->error))
+ goto put;
+
+ if (ipv6_addr_any(&fl.fl6_src)) {
+ ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
+ &fl.fl6_dst, 0, &fl.fl6_src);
+ if (ret)
+ goto put;
+
+ src_in->sin6_family = AF_INET6;
+ ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
+ }
+
+ if (dst->dev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ goto put;
+ }
+ /* If the device does ARP internally, return 'done' */
if (dst->dev->flags & IFF_NOARP) {
ret = rdma_copy_addr(addr, dst->dev, NULL);
- } else {
- neigh = dst->neighbour;
- if (neigh && (neigh->nud_state & NUD_VALID))
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
+ goto put;
+ }
+
+ neigh = dst->neighbour;
+ if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ neigh_event_send(dst->neighbour, NULL);
+ ret = -ENODATA;
+ goto put;
}
+ ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+put:
dst_release(dst);
return ret;
}
#else
-static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
return -EADDRNOTAVAIL;
}
#endif
-static int addr_resolve_remote(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+static int addr_resolve(struct sockaddr *src_in,
+ struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr)
{
if (src_in->sa_family == AF_INET) {
- return addr4_resolve_remote((struct sockaddr_in *) src_in,
+ return addr4_resolve((struct sockaddr_in *) src_in,
(struct sockaddr_in *) dst_in, addr);
} else
- return addr6_resolve_remote((struct sockaddr_in6 *) src_in,
+ return addr6_resolve((struct sockaddr_in6 *) src_in,
(struct sockaddr_in6 *) dst_in, addr);
}
@@ -327,8 +318,7 @@ static void process_req(struct work_struct *work)
if (req->status == -ENODATA) {
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve_remote(src_in, dst_in,
- req->addr);
+ req->status = addr_resolve(src_in, dst_in, req->addr);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA)
@@ -352,82 +342,6 @@ static void process_req(struct work_struct *work)
}
}
-static int addr_resolve_local(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
-{
- struct net_device *dev;
- int ret;
-
- switch (dst_in->sa_family) {
- case AF_INET:
- {
- __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
- __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
-
- dev = ip_dev_find(&init_net, dst_ip);
- if (!dev)
- return -EADDRNOTAVAIL;
-
- if (ipv4_is_zeronet(src_ip)) {
- src_in->sa_family = dst_in->sa_family;
- ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip;
- ret = rdma_copy_addr(addr, dev, dev->dev_addr);
- } else if (ipv4_is_loopback(src_ip)) {
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- } else {
- ret = rdma_translate_ip(src_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- }
- dev_put(dev);
- break;
- }
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case AF_INET6:
- {
- struct in6_addr *a;
-
- for_each_netdev(&init_net, dev)
- if (ipv6_chk_addr(&init_net,
- &((struct sockaddr_in6 *) dst_in)->sin6_addr,
- dev, 1))
- break;
-
- if (!dev)
- return -EADDRNOTAVAIL;
-
- a = &((struct sockaddr_in6 *) src_in)->sin6_addr;
-
- if (ipv6_addr_any(a)) {
- src_in->sa_family = dst_in->sa_family;
- ((struct sockaddr_in6 *) src_in)->sin6_addr =
- ((struct sockaddr_in6 *) dst_in)->sin6_addr;
- ret = rdma_copy_addr(addr, dev, dev->dev_addr);
- } else if (ipv6_addr_loopback(a)) {
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- } else {
- ret = rdma_translate_ip(src_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- }
- break;
- }
-#endif
-
- default:
- ret = -EADDRNOTAVAIL;
- break;
- }
-
- return ret;
-}
-
int rdma_resolve_ip(struct rdma_addr_client *client,
struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
@@ -443,22 +357,28 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
if (!req)
return -ENOMEM;
- if (src_addr)
- memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr));
- memcpy(&req->dst_addr, dst_addr, ip_addr_size(dst_addr));
+ src_in = (struct sockaddr *) &req->src_addr;
+ dst_in = (struct sockaddr *) &req->dst_addr;
+
+ if (src_addr) {
+ if (src_addr->sa_family != dst_addr->sa_family) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memcpy(src_in, src_addr, ip_addr_size(src_addr));
+ } else {
+ src_in->sa_family = dst_addr->sa_family;
+ }
+
+ memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
req->addr = addr;
req->callback = callback;
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
-
- req->status = addr_resolve_local(src_in, dst_in, addr);
- if (req->status == -EADDRNOTAVAIL)
- req->status = addr_resolve_remote(src_in, dst_in, addr);
-
+ req->status = addr_resolve(src_in, dst_in, addr);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -467,15 +387,16 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
case -ENODATA:
req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
queue_req(req);
- addr_send_arp(dst_in);
break;
default:
ret = req->status;
atomic_dec(&client->refcount);
- kfree(req);
- break;
+ goto err;
}
return ret;
+err:
+ kfree(req);
+ return ret;
}
EXPORT_SYMBOL(rdma_resolve_ip);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 0753178..875e34e 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -330,17 +330,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
union ib_gid gid;
int ret = -ENODEV;
- switch (rdma_node_get_transport(dev_addr->dev_type)) {
- case RDMA_TRANSPORT_IB:
- ib_addr_get_sgid(dev_addr, &gid);
- break;
- case RDMA_TRANSPORT_IWARP:
- iw_addr_get_sgid(dev_addr, &gid);
- break;
- default:
- return -ENODEV;
- }
-
+ rdma_addr_get_sgid(dev_addr, &gid);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = ib_find_cached_gid(cma_dev->device, &gid,
&id_priv->id.port_num, NULL);
@@ -1032,11 +1022,17 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
- ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
- if (ret)
- goto destroy_id;
+ if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
+ rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+ rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+ ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ } else {
+ ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
+ &rt->addr.dev_addr);
+ if (ret)
+ goto destroy_id;
+ }
+ rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1071,10 +1067,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst);
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
- if (ret)
- goto err;
+ if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
+ ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+ }
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1474,15 +1472,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
-{
- struct sockaddr_storage addr_in;
-
- memset(&addr_in, 0, sizeof addr_in);
- addr_in.ss_family = af;
- return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
-}
-
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
struct rdma_id_private *id_priv;
@@ -1490,7 +1479,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) {
- ret = cma_bind_any(id, AF_INET);
+ ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
+ ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
if (ret)
return ret;
}
@@ -1565,8 +1555,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
struct sockaddr_in6 *sin6;
memset(&path_rec, 0, sizeof path_rec);
- ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
- ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
+ rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
+ rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
@@ -1781,7 +1771,11 @@ port_found:
if (ret)
goto out;
- ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ id_priv->id.route.addr.dev_addr.dev_type =
+ (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
+ ARPHRD_INFINIBAND : ARPHRD_ETHER;
+
+ rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
@@ -1839,7 +1833,7 @@ out:
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
{
struct cma_work *work;
- struct sockaddr_in *src_in, *dst_in;
+ struct sockaddr *src, *dst;
union ib_gid gid;
int ret;
@@ -1853,14 +1847,19 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
goto err;
}
- ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
- ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
+ rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
- src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
- dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
- src_in->sin_family = dst_in->sin_family;
- src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
+ src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
+ if (cma_zero_addr(src)) {
+ dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
+ if ((src->sa_family = dst->sa_family) == AF_INET) {
+ ((struct sockaddr_in *) src)->sin_addr.s_addr =
+ ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ } else {
+ ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
+ &((struct sockaddr_in6 *) dst)->sin6_addr);
+ }
}
work->id = id_priv;
@@ -1878,10 +1877,14 @@ err:
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr)
{
- if (src_addr && src_addr->sa_family)
- return rdma_bind_addr(id, src_addr);
- else
- return cma_bind_any(id, dst_addr->sa_family);
+ if (!src_addr || !src_addr->sa_family) {
+ src_addr = (struct sockaddr *) &id->route.addr.src_addr;
+ if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
+ ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
+ ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
+ }
+ }
+ return rdma_bind_addr(id, src_addr);
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
@@ -2077,6 +2080,25 @@ static int cma_get_port(struct rdma_id_private *id_priv)
return ret;
}
+static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
+ struct sockaddr *addr)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct sockaddr_in6 *sin6;
+
+ if (addr->sa_family != AF_INET6)
+ return 0;
+
+ sin6 = (struct sockaddr_in6 *) addr;
+ if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ !sin6->sin6_scope_id)
+ return -EINVAL;
+
+ dev_addr->bound_dev_if = sin6->sin6_scope_id;
+#endif
+ return 0;
+}
+
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
@@ -2089,6 +2111,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
return -EINVAL;
+ ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
+ if (ret)
+ goto err1;
+
if (!cma_any_addr(addr)) {
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
if (ret)
@@ -2108,7 +2134,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (!cma_any_addr(addr)) {
+ if (id_priv->cma_dev) {
mutex_lock(&lock);
cma_detach_from_dev(id_priv);
mutex_unlock(&lock);
@@ -2687,10 +2713,15 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
if (cma_any_addr(addr)) {
memset(mgid, 0, sizeof *mgid);
} else if ((addr->sa_family == AF_INET6) &&
- ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
+ ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
0xFF10A01B)) {
/* IPv6 address is an SA assigned MGID. */
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ } else if ((addr->sa_family == AF_INET6)) {
+ ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
+ if (id_priv->id.ps == RDMA_PS_UDP)
+ mc_map[7] = 0x01; /* Use RDMA CM signature */
+ *mgid = *(union ib_gid *) (mc_map + 4);
} else {
ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
@@ -2716,7 +2747,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
if (id_priv->id.ps == RDMA_PS_UDP)
rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
- ib_addr_get_sgid(dev_addr, &rec.port_gid);
+ rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1;
@@ -2815,7 +2846,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
dev_addr = &id_priv->id.route.addr.dev_addr;
- if ((dev_addr->src_dev == ndev) &&
+ if ((dev_addr->bound_dev_if == ndev->ifindex) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
ndev->name, &id_priv->id);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8254371..7e1ffd8c 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -604,6 +604,12 @@ retry:
return ret ? ret : id;
}
+void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
+{
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_path);
+
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index bb96d3c..b2e16c3 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -43,6 +43,7 @@
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h>
+#include <rdma/rdma_cm_ib.h>
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
@@ -562,10 +563,10 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
switch (route->num_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
- ib_addr_get_dgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].dgid);
- ib_addr_get_sgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].sgid);
+ rdma_addr_get_dgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].dgid);
+ rdma_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
break;
case 2:
@@ -812,6 +813,51 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
return ret;
}
+static int ucma_set_ib_path(struct ucma_context *ctx,
+ struct ib_path_rec_data *path_data, size_t optlen)
+{
+ struct ib_sa_path_rec sa_path;
+ struct rdma_cm_event event;
+ int ret;
+
+ if (optlen % sizeof(*path_data))
+ return -EINVAL;
+
+ for (; optlen; optlen -= sizeof(*path_data), path_data++) {
+ if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
+ IB_PATH_BIDIRECTIONAL))
+ break;
+ }
+
+ if (!optlen)
+ return -EINVAL;
+
+ ib_sa_unpack_path(path_data->path_rec, &sa_path);
+ ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ if (ret)
+ return ret;
+
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ return ucma_event_handler(ctx->cm_id, &event);
+}
+
+static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
+ void *optval, size_t optlen)
+{
+ int ret;
+
+ switch (optname) {
+ case RDMA_OPTION_IB_PATH:
+ ret = ucma_set_ib_path(ctx, optval, optlen);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
static int ucma_set_option_level(struct ucma_context *ctx, int level,
int optname, void *optval, size_t optlen)
{
@@ -821,6 +867,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
case RDMA_OPTION_ID:
ret = ucma_set_option_id(ctx, optname, optval, optlen);
break;
+ case RDMA_OPTION_IB:
+ ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+ break;
default:
ret = -ENOSYS;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 56feab6..112d397 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -285,7 +285,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext = ibdev->alloc_ucontext(ibdev, &udata);
if (IS_ERR(ucontext)) {
- ret = PTR_ERR(file->ucontext);
+ ret = PTR_ERR(ucontext);
goto err;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index aec0fbd..5f284ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -492,6 +492,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
int is_async, int *fd)
{
struct ib_uverbs_event_file *ev_file;
+ struct path path;
struct file *filp;
int ret;
@@ -519,8 +520,10 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
* system call on a uverbs file, which will already have a
* module reference.
*/
- filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
- FMODE_READ, fops_get(&uverbs_event_fops));
+ path.mnt = uverbs_event_mnt;
+ path.dentry = uverbs_event_mnt->mnt_root;
+ path_get(&path);
+ filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
if (!filp) {
ret = -ENFILE;
goto err_fd;
@@ -531,6 +534,8 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
return filp;
err_fd:
+ fops_put(&uverbs_event_fops);
+ path_put(&path);
put_unused_fd(*fd);
err:
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index a6d8944..ad51886 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -798,8 +798,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
u8 actual_sge_count;
u32 msg_size;
- if (qp->state > IB_QPS_RTS)
- return -EINVAL;
+ if (qp->state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
while (ib_wr) {
@@ -930,6 +932,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -944,8 +947,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
unsigned long lock_flags;
int err = 0;
- if (qp->state > IB_QPS_RTS)
- return -EINVAL;
+ if (qp->state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
/*
* Try and post each work request
@@ -998,6 +1003,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
return err;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index bfd03bf..f3d440c 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
#include "t3_cpl.h"
#include "t3cdev.h"
@@ -75,13 +76,13 @@ struct cxio_hal_ctrl_qp {
};
struct cxio_hal_resource {
- struct kfifo *tpt_fifo;
+ struct kfifo tpt_fifo;
spinlock_t tpt_fifo_lock;
- struct kfifo *qpid_fifo;
+ struct kfifo qpid_fifo;
spinlock_t qpid_fifo_lock;
- struct kfifo *cqid_fifo;
+ struct kfifo cqid_fifo;
spinlock_t cqid_fifo_lock;
- struct kfifo *pdid_fifo;
+ struct kfifo pdid_fifo;
spinlock_t pdid_fifo_lock;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index bd233c0..31f9201 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -39,12 +39,12 @@
#include "cxio_resource.h"
#include "cxio_hal.h"
-static struct kfifo *rhdl_fifo;
+static struct kfifo rhdl_fifo;
static spinlock_t rhdl_fifo_lock;
#define RANDOM_SIZE 16
-static int __cxio_init_resource_fifo(struct kfifo **fifo,
+static int __cxio_init_resource_fifo(struct kfifo *fifo,
spinlock_t *fifo_lock,
u32 nr, u32 skip_low,
u32 skip_high,
@@ -55,12 +55,11 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
u32 rarray[16];
spin_lock_init(fifo_lock);
- *fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock);
- if (IS_ERR(*fifo))
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
return -ENOMEM;
for (i = 0; i < skip_low + skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32));
+ kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
random_bytes = random32();
@@ -72,33 +71,35 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
random_bytes = random32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
- __kfifo_put(*fifo,
+ kfifo_in(fifo,
(unsigned char *) &rarray[idx],
sizeof(u32));
rarray[idx] = i;
j++;
}
for (i = 0; i < RANDOM_SIZE; i++)
- __kfifo_put(*fifo,
+ kfifo_in(fifo,
(unsigned char *) &rarray[i],
sizeof(u32));
} else
for (i = skip_low; i < nr - skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32));
+ kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
for (i = 0; i < skip_low + skip_high; i++)
- kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32));
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock) != sizeof(u32))
+ break;
return 0;
}
-static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock,
+static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
skip_high, 0));
}
-static int cxio_init_resource_fifo_random(struct kfifo **fifo,
+static int cxio_init_resource_fifo_random(struct kfifo *fifo,
spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
@@ -113,15 +114,13 @@ static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
- rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL,
- &rdev_p->rscp->qpid_fifo_lock);
- if (IS_ERR(rdev_p->rscp->qpid_fifo))
+ if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
+ GFP_KERNEL))
return -ENOMEM;
for (i = 16; i < T3_MAX_NUM_QP; i++)
if (!(i & rdev_p->qpmask))
- __kfifo_put(rdev_p->rscp->qpid_fifo,
+ kfifo_in(&rdev_p->rscp->qpid_fifo,
(unsigned char *) &i, sizeof(u32));
return 0;
}
@@ -134,7 +133,7 @@ int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
void cxio_hal_destroy_rhdl_resource(void)
{
- kfifo_free(rhdl_fifo);
+ kfifo_free(&rhdl_fifo);
}
/* nr_* must be power of 2 */
@@ -167,11 +166,11 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
goto pdid_err;
return 0;
pdid_err:
- kfifo_free(rscp->cqid_fifo);
+ kfifo_free(&rscp->cqid_fifo);
cqid_err:
- kfifo_free(rscp->qpid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
qpid_err:
- kfifo_free(rscp->tpt_fifo);
+ kfifo_free(&rscp->tpt_fifo);
tpt_err:
return -ENOMEM;
}
@@ -179,33 +178,37 @@ tpt_err:
/*
* returns 0 if no resource available
*/
-static u32 cxio_hal_get_resource(struct kfifo *fifo)
+static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
{
u32 entry;
- if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32)))
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
return entry;
else
return 0; /* fifo emptry */
}
-static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry)
+static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
+ u32 entry)
{
- BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0);
+ BUG_ON(
+ kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
+ == 0);
}
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->tpt_fifo);
+ return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
}
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
{
- cxio_hal_put_resource(rscp->tpt_fifo, stag);
+ cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
}
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
{
- u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
+ u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
+ &rscp->qpid_fifo_lock);
PDBG("%s qpid 0x%x\n", __func__, qpid);
return qpid;
}
@@ -213,35 +216,35 @@ u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
{
PDBG("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(rscp->qpid_fifo, qpid);
+ cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
}
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->cqid_fifo);
+ return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
}
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
{
- cxio_hal_put_resource(rscp->cqid_fifo, cqid);
+ cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
}
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->pdid_fifo);
+ return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
}
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
{
- cxio_hal_put_resource(rscp->pdid_fifo, pdid);
+ cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
}
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
{
- kfifo_free(rscp->tpt_fifo);
- kfifo_free(rscp->cqid_fifo);
- kfifo_free(rscp->qpid_fifo);
- kfifo_free(rscp->pdid_fifo);
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->cqid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
kfree(rscp);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 1cecf98..3eb8cec 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -365,18 +365,19 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if (num_wrs <= 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
- *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -428,10 +429,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->opcode);
err = -EINVAL;
}
- if (err) {
- *bad_wr = wr;
+ if (err)
break;
- }
wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
sqp->wr_id = wr->wr_id;
sqp->opcode = wr2opcode(t3_wr_opcode);
@@ -454,6 +453,10 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+
+out:
+ if (err)
+ *bad_wr = wr;
return err;
}
@@ -471,18 +474,19 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
qhp->wq.rq_size_log2) - 1;
if (!wr) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -ENOMEM;
+ goto out;
}
while (wr) {
if (wr->num_sge > T3_MAX_SGE) {
err = -EINVAL;
- *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -494,10 +498,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
err = build_zero_stag_recv(qhp, wqe, wr);
else
err = -ENOMEM;
- if (err) {
- *bad_wr = wr;
+
+ if (err)
break;
- }
+
build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
@@ -511,6 +515,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+
+out:
+ if (err)
+ *bad_wr = wr;
return err;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index c825142..0136abd 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -375,6 +375,7 @@ extern rwlock_t ehca_qp_idr_lock;
extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
+extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 523e733c..3b87589 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -169,12 +169,15 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
unsigned long flags;
u64 h_ret;
- spin_lock_irqsave(&eq->spinlock, flags);
ibmebus_free_irq(eq->ist, (void *)shca);
- h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+ spin_lock_irqsave(&shca_list_lock, flags);
+ eq->is_initialized = 0;
+ spin_unlock_irqrestore(&shca_list_lock, flags);
- spin_unlock_irqrestore(&eq->spinlock, flags);
+ tasklet_kill(&eq->interrupt_task);
+
+ h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't free EQ resources.");
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fb2d83c..129a6be 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -123,7 +123,7 @@ DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
static LIST_HEAD(shca_list); /* list of all registered ehcas */
-static DEFINE_SPINLOCK(shca_list_lock);
+DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 8fd88cd..e3ec7fd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -400,7 +400,6 @@ static inline void map_ib_wc_status(u32 cqe_status,
static inline int post_one_send(struct ehca_qp *my_qp,
struct ib_send_wr *cur_send_wr,
- struct ib_send_wr **bad_send_wr,
int hidden)
{
struct ehca_wqe *wqe_p;
@@ -412,8 +411,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_send_wr)
- *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
@@ -433,8 +430,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset;
- if (bad_send_wr)
- *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Could not write WQE "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -EINVAL;
@@ -448,7 +443,6 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr **bad_send_wr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
- struct ib_send_wr *cur_send_wr;
int wqe_cnt = 0;
int ret = 0;
unsigned long flags;
@@ -457,7 +451,8 @@ int ehca_post_send(struct ib_qp *qp,
if (unlikely(my_qp->state < IB_QPS_RTS)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* LOCK the QUEUE */
@@ -476,24 +471,21 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr circ_wr;
memset(&circ_wr, 0, sizeof(circ_wr));
circ_wr.opcode = IB_WR_RDMA_READ;
- post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
+ post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
wqe_cnt++;
ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
my_qp->message_count = my_qp->packet_count = 0;
}
/* loop processes list of send reqs */
- for (cur_send_wr = send_wr; cur_send_wr != NULL;
- cur_send_wr = cur_send_wr->next) {
- ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
+ while (send_wr) {
+ ret = post_one_send(my_qp, send_wr, 0);
if (unlikely(ret)) {
- /* if one or more WQEs were successful, don't fail */
- if (wqe_cnt)
- ret = 0;
goto post_send_exit0;
}
wqe_cnt++;
- } /* eof for cur_send_wr */
+ send_wr = send_wr->next;
+ }
post_send_exit0:
iosync(); /* serialize GAL register access */
@@ -503,6 +495,10 @@ post_send_exit0:
my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
+
+out:
+ if (ret)
+ *bad_send_wr = send_wr;
return ret;
}
@@ -511,7 +507,6 @@ static int internal_post_recv(struct ehca_qp *my_qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
- struct ib_recv_wr *cur_recv_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
@@ -522,27 +517,23 @@ static int internal_post_recv(struct ehca_qp *my_qp,
if (unlikely(!HAS_RQ(my_qp))) {
ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
my_qp, my_qp->real_qp_num, my_qp->ext_type);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_r, flags);
- /* loop processes list of send reqs */
- for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
- cur_recv_wr = cur_recv_wr->next) {
+ /* loop processes list of recv reqs */
+ while (recv_wr) {
u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_recv_wr)
- *bad_recv_wr = cur_recv_wr;
- if (wqe_cnt == 0) {
- ret = -ENOMEM;
- ehca_err(dev, "Too many posted WQEs "
- "qp_num=%x", my_qp->real_qp_num);
- }
+ ret = -ENOMEM;
+ ehca_err(dev, "Too many posted WQEs "
+ "qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
/*
@@ -552,7 +543,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
/* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
rq_map_idx);
/*
* if something failed,
@@ -560,22 +551,20 @@ static int internal_post_recv(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_rqueue.current_q_offset = start_offset;
- *bad_recv_wr = cur_recv_wr;
- if (wqe_cnt == 0) {
- ret = -EINVAL;
- ehca_err(dev, "Could not write WQE "
- "qp_num=%x", my_qp->real_qp_num);
- }
+ ret = -EINVAL;
+ ehca_err(dev, "Could not write WQE "
+ "qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
qmap_entry = &my_qp->rq_map.map[rq_map_idx];
- qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
+ qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 1;
wqe_cnt++;
- } /* eof for cur_recv_wr */
+ recv_wr = recv_wr->next;
+ } /* eof for recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
@@ -584,6 +573,11 @@ post_recv_exit0:
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
+
+out:
+ if (ret)
+ *bad_recv_wr = recv_wr;
+
return ret;
}
@@ -597,6 +591,7 @@ int ehca_post_recv(struct ib_qp *qp,
if (unlikely(my_qp->state == IB_QPS_RESET)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
+ *bad_recv_wr = recv_wr;
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 013d1380..d2787fe 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,6 +39,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
@@ -1697,7 +1698,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail)
{
unsigned long flags;
- unsigned end, cnt = 0, next;
+ unsigned end, cnt = 0;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
@@ -1748,12 +1749,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
if (dd->ipath_pioupd_thresh) {
end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
- next = find_first_bit(dd->ipath_pioavailkernel, end);
- while (next < end) {
- cnt++;
- next = find_next_bit(dd->ipath_pioavailkernel, end,
- next + 1);
- }
+ cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
}
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index b368406..100da85 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -346,10 +346,8 @@ static int ipathfs_fill_super(struct super_block *sb, void *data,
list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
spin_unlock_irqrestore(&ipath_devs_lock, flags);
ret = create_device_files(sb, dd);
- if (ret) {
- deactivate_locked_super(sb);
+ if (ret)
goto bail;
- }
spin_lock_irqsave(&ipath_devs_lock, flags);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3cb3f47..e596537 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -103,7 +103,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
- if (dev->dev->caps.max_gso_sz)
+ if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
props->device_cap_flags |= IB_DEVICE_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 256a00c..2a97c96 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -54,7 +54,8 @@ enum {
/*
* Largest possible UD header: send with GRH and immediate data.
*/
- MLX4_IB_UD_HEADER_SIZE = 72
+ MLX4_IB_UD_HEADER_SIZE = 72,
+ MLX4_IB_LSO_HEADER_SPARE = 128,
};
struct mlx4_ib_sqp {
@@ -67,7 +68,8 @@ struct mlx4_ib_sqp {
};
enum {
- MLX4_IB_MIN_SQ_STRIDE = 6
+ MLX4_IB_MIN_SQ_STRIDE = 6,
+ MLX4_IB_CACHE_LINE_SIZE = 64,
};
static const __be32 mlx4_ib_opcode[] = {
@@ -261,7 +263,7 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
case IB_QPT_UD:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_datagram_seg) +
- ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
+ ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
case IB_QPT_UC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
@@ -897,7 +899,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(ibqp->qp_type) << 16));
- context->flags |= cpu_to_be32(1 << 8); /* DE? */
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
@@ -1467,16 +1468,12 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz)
+ __be32 *lso_hdr_sz, __be32 *blh)
{
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
- /*
- * This is a temporary limitation and will be removed in
- * a forthcoming FW release:
- */
- if (unlikely(halign > 64))
- return -EINVAL;
+ if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
+ *blh = cpu_to_be32(1 << 6);
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
@@ -1522,6 +1519,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 dummy;
__be32 *lso_wqe;
__be32 uninitialized_var(lso_hdr_sz);
+ __be32 blh;
int i;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1530,6 +1528,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
for (nreq = 0; wr; ++nreq, wr = wr->next) {
lso_wqe = &dummy;
+ blh = 0;
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
err = -ENOMEM;
@@ -1616,7 +1615,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
- err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
+ err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -1687,7 +1686,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
- (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+ (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
@@ -1753,7 +1752,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {
+ if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
err = -ENOMEM;
*bad_wr = wr;
goto out;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index d425652..cf8085b 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -74,6 +74,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_srq *srq;
struct mlx4_wqe_srq_next_seg *next;
+ struct mlx4_wqe_data_seg *scatter;
int desc_size;
int buf_size;
int err;
@@ -149,6 +150,11 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
next = get_wqe(srq, i);
next->next_wqe_index =
cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+
+ for (scatter = (void *) (next + 1);
+ (void *) scatter < (void *) next + desc_size;
+ ++scatter)
+ scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
}
err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index d449eb6..846dc97 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -4,14 +4,13 @@ config INFINIBAND_NES
select LIBCRC32C
select INET_LRO
---help---
- This is a low-level driver for NetEffect RDMA enabled
- Network Interface Cards (RNIC).
+ This is the RDMA Network Interface Card (RNIC) driver for
+ NetEffect Ethernet Cluster Server Adapters.
config INFINIBAND_NES_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_NES
default n
---help---
- This option causes the NetEffect RNIC driver to produce debug
- messages. Select this if you are developing the driver
- or trying to diagnose a problem.
+ This option enables debug messages from the NetEffect RNIC
+ driver. Select this if you are diagnosing a problem.
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index cbde0cf..b9d09ba 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -521,7 +521,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
spin_lock_init(&nesdev->indexed_regs_lock);
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
- mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
+ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0),
+ pci_resource_len(pcidev, BAR_0));
if (mmio_regs == NULL) {
printk(KERN_ERR PFX "Unable to remap BAR0\n");
ret = -EIO;
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bcc6abc..9884056 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 73473db..39468c27 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,6 +52,7 @@
#include <linux/random.h>
#include <linux/list.h>
#include <linux/threads.h>
+#include <linux/highmem.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
@@ -251,6 +252,33 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
mpa_frame = (struct ietf_mpa_frame *)buffer;
cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
+ /* make sure mpa private data len is less than 512 bytes */
+ if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) {
+ nes_debug(NES_DBG_CM, "The received Length of Private"
+ " Data field exceeds 512 octets\n");
+ return -EINVAL;
+ }
+ /*
+ * make sure MPA receiver interoperate with the
+ * received MPA version and MPA key information
+ *
+ */
+ if (mpa_frame->rev != mpa_version) {
+ nes_debug(NES_DBG_CM, "The received mpa version"
+ " can not be interoperated\n");
+ return -EINVAL;
+ }
+ if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
+ nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
+ nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
+ return -EINVAL;
+ }
+ }
if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
@@ -486,6 +514,8 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
send_reset(cm_node, NULL);
break;
default:
+ add_ref_cm_node(cm_node);
+ send_reset(cm_node, NULL);
create_event(cm_node, NES_CM_EVENT_ABORTED);
}
}
@@ -949,6 +979,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
reset_entry);
{
struct nes_cm_node *loopback = cm_node->loopbackpartner;
+ enum nes_cm_node_state old_state;
if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
rem_ref_cm_node(cm_node->cm_core, cm_node);
} else {
@@ -960,11 +991,12 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
NES_CM_STATE_CLOSED;
WARN_ON(1);
} else {
- cm_node->state =
- NES_CM_STATE_CLOSED;
- rem_ref_cm_node(
- cm_node->cm_core,
- cm_node);
+ old_state = cm_node->state;
+ cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != NES_CM_STATE_MPAREQ_RCVD)
+ rem_ref_cm_node(
+ cm_node->cm_core,
+ cm_node);
}
} else {
struct nes_cm_event event;
@@ -980,20 +1012,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
cm_event_connect_error(&event);
+ cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
loopback->state = NES_CM_STATE_CLOSED;
- event.cm_node = cm_node;
- event.cm_info.rem_addr =
- cm_node->rem_addr;
- event.cm_info.loc_addr =
- cm_node->loc_addr;
- event.cm_info.rem_port =
- cm_node->rem_port;
- event.cm_info.loc_port =
- cm_node->loc_port;
- event.cm_info.cm_id = cm_node->cm_id;
- cm_event_reset(&event);
-
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1077,12 +1098,13 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
/**
* nes_addr_resolve_neigh
*/
-static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
+static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
{
struct rtable *rt;
struct flowi fl;
struct neighbour *neigh;
- int rc = -1;
+ int rc = arpindex;
+ struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
@@ -1098,6 +1120,21 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
neigh->ha, ntohl(rt->rt_gateway));
+
+ if (arpindex >= 0) {
+ if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
+ neigh->ha, ETH_ALEN)){
+ /* Mac address same as in nes_arp_table */
+ neigh_release(neigh);
+ ip_rt_put(rt);
+ return rc;
+ }
+
+ nes_manage_arp_cache(nesvnic->netdev,
+ nesadapter->arp_table[arpindex].mac_addr,
+ dst_ip, NES_ARP_DELETE);
+ }
+
nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
@@ -1113,7 +1150,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
return rc;
}
-
/**
* make_cm_node - create a new instance of a cm node
*/
@@ -1123,6 +1159,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
{
struct nes_cm_node *cm_node;
struct timespec ts;
+ int oldarpindex = 0;
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
@@ -1176,17 +1213,18 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
nesadapter = nesdev->nesadapter;
cm_node->loopbackpartner = NULL;
+
/* get the mac addr for the remote node */
if (ipv4_is_loopback(htonl(cm_node->rem_addr)))
arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
- else
- arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ else {
+ oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
+
+ }
if (arpindex < 0) {
- arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
- if (arpindex < 0) {
- kfree(cm_node);
- return NULL;
- }
+ kfree(cm_node);
+ return NULL;
}
/* copy the mac addr to node context */
@@ -1333,13 +1371,20 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_MPAREQ_SENT:
case NES_CM_STATE_MPAREJ_RCVD:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_LAST_ACK;
send_fin(cm_node, NULL);
break;
+ case NES_CM_STATE_MPAREQ_SENT:
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ add_ref_cm_node(cm_node);
+ send_reset(cm_node, NULL);
+ break;
case NES_CM_STATE_FIN_WAIT1:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
@@ -1590,6 +1635,7 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_TSA:
@@ -1641,9 +1687,15 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
passive_open_err(cm_node, skb, 1);
break;
case NES_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ send_reset(cm_node, skb);
+ break;
case NES_CM_STATE_CLOSED:
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1712,8 +1764,13 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_LISTENING:
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ send_reset(cm_node, skb);
+ break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_LAST_ACK:
@@ -1974,7 +2031,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (!cm_node)
return NULL;
mpa_frame = &cm_node->mpa_frame;
- strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ);
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
mpa_frame->flags = IETF_MPA_FLAGS_CRC;
mpa_frame->rev = IETF_MPA_VERSION;
mpa_frame->priv_data_len = htons(private_data_len);
@@ -2102,30 +2159,39 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
cm_node->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_core, cm_node);
} else {
- ret = send_mpa_reject(cm_node);
- if (ret) {
- cm_node->state = NES_CM_STATE_CLOSED;
- err = send_reset(cm_node, NULL);
- if (err)
- WARN_ON(1);
- } else
- cm_id->add_ref(cm_id);
+ if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
+ rem_ref_cm_node(cm_core, cm_node);
+ } else {
+ ret = send_mpa_reject(cm_node);
+ if (ret) {
+ cm_node->state = NES_CM_STATE_CLOSED;
+ err = send_reset(cm_node, NULL);
+ if (err)
+ WARN_ON(1);
+ } else
+ cm_id->add_ref(cm_id);
+ }
}
} else {
cm_node->cm_id = NULL;
- event.cm_node = loopback;
- event.cm_info.rem_addr = loopback->rem_addr;
- event.cm_info.loc_addr = loopback->loc_addr;
- event.cm_info.rem_port = loopback->rem_port;
- event.cm_info.loc_port = loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- cm_event_mpa_reject(&event);
- rem_ref_cm_node(cm_core, cm_node);
- loopback->state = NES_CM_STATE_CLOSING;
+ if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
+ rem_ref_cm_node(cm_core, cm_node);
+ rem_ref_cm_node(cm_core, loopback);
+ } else {
+ event.cm_node = loopback;
+ event.cm_info.rem_addr = loopback->rem_addr;
+ event.cm_info.loc_addr = loopback->loc_addr;
+ event.cm_info.rem_port = loopback->rem_port;
+ event.cm_info.loc_port = loopback->loc_port;
+ event.cm_info.cm_id = loopback->cm_id;
+ cm_event_mpa_reject(&event);
+ rem_ref_cm_node(cm_core, cm_node);
+ loopback->state = NES_CM_STATE_CLOSING;
- cm_id = loopback->cm_id;
- rem_ref_cm_node(cm_core, loopback);
- cm_id->rem_ref(cm_id);
+ cm_id = loopback->cm_id;
+ rem_ref_cm_node(cm_core, loopback);
+ cm_id->rem_ref(cm_id);
+ }
}
return ret;
@@ -2164,11 +2230,15 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
case NES_CM_STATE_CLOSING:
ret = -1;
break;
- case NES_CM_STATE_MPAREJ_RCVD:
case NES_CM_STATE_LISTENING:
+ cleanup_retrans_entry(cm_node);
+ send_reset(cm_node, NULL);
+ break;
+ case NES_CM_STATE_MPAREJ_RCVD:
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_LISTENER_DESTROYED:
ret = rem_ref_cm_node(cm_core, cm_node);
break;
case NES_CM_STATE_TSA:
@@ -2687,8 +2757,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_pd *nespd;
u64 tagged_offset;
-
-
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
@@ -2704,6 +2772,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
"%s\n", cm_node, nesvnic, nesvnic->netdev,
nesvnic->netdev->name);
+ if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) {
+ if (cm_node->loopbackpartner)
+ rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return -EINVAL;
+ }
+
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2786,6 +2861,10 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cpu_to_le32(conn_param->private_data_len +
sizeof(struct ietf_mpa_frame));
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -2929,7 +3008,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
return -EINVAL;
- strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
+ memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
if (loopback) {
memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
loopback->mpa_frame.priv_data_len = pdata_len;
@@ -2974,6 +3053,9 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!nesdev)
return -EINVAL;
+ if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
+ return -EINVAL;
+
nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
"0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
ntohl(nesvnic->local_ipaddr),
@@ -3251,6 +3333,11 @@ static void cm_event_connected(struct nes_cm_event *event)
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
+
/* use the reserved spot on the WQ for the extra first WQE */
nesqp->nesqp_context->ird_ord_sizes &=
cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -3346,7 +3433,7 @@ static void cm_event_connect_error(struct nes_cm_event *event)
nesqp->cm_id = NULL;
cm_id->provider_data = NULL;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
+ cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
@@ -3390,6 +3477,8 @@ static void cm_event_reset(struct nes_cm_event *event)
nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
+ if (!nesqp)
+ return;
nesqp->cm_id = NULL;
/* cm_id->provider_data = NULL; */
@@ -3401,8 +3490,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- ret = cm_id->event_handler(cm_id, &cm_event);
cm_id->add_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
atomic_inc(&cm_closes);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = IW_CM_EVENT_STATUS_OK;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 90e8e4d..d9825fd 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -47,6 +47,8 @@
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -169,7 +171,7 @@ struct nes_timer_entry {
#define NES_CM_DEF_SEQ2 0x18ed5740
#define NES_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER 512
+#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN)
typedef u32 nes_addr_t;
@@ -198,6 +200,7 @@ enum nes_cm_node_state {
NES_CM_STATE_TIME_WAIT,
NES_CM_STATE_LAST_ACK,
NES_CM_STATE_CLOSING,
+ NES_CM_STATE_LISTENER_DESTROYED,
NES_CM_STATE_CLOSED
};
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index 0fb8d81..b4393a1 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 3512d6d..b1c2cbb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -424,8 +424,9 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->base_pd = 1;
- nesadapter->device_cap_flags =
- IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+ nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -436,11 +437,12 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
- /* mark the usual suspect QPs and CQs as in use */
+ /* mark the usual suspect QPs, MR and CQs as in use */
for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
set_bit(u32temp, nesadapter->allocated_qps);
set_bit(u32temp, nesadapter->allocated_cqs);
}
+ set_bit(0, nesadapter->allocated_mrs);
for (u32temp = 0; u32temp < 20; u32temp++)
set_bit(u32temp, nesadapter->allocated_pds);
@@ -481,7 +483,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
nesadapter->max_sge = 4;
- nesadapter->max_cqe = 32767;
+ nesadapter->max_cqe = 32766;
if (nes_read_eeprom_values(nesdev, nesadapter)) {
printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
@@ -1355,6 +1357,8 @@ int nes_init_phy(struct nes_device *nesdev)
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D)) {
+ u32 first_time = 1;
+
/* Check firmware heartbeat */
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
@@ -1362,8 +1366,13 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (temp_phy_data != temp_phy_data2)
- return 0;
+ if (temp_phy_data != temp_phy_data2) {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ if ((temp_phy_data & 0xff) > 0x20)
+ return 0;
+ printk(PFX "Reinitializing PHY\n");
+ }
/* no heartbeat, configure the PHY */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
@@ -1399,7 +1408,7 @@ int nes_init_phy(struct nes_device *nesdev)
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
do {
if (counter++ > 150) {
- nes_debug(NES_DBG_PHY, "No PHY heartbeat\n");
+ printk(PFX "No PHY heartbeat\n");
break;
}
mdelay(1);
@@ -1413,11 +1422,20 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
if (counter++ > 300) {
- nes_debug(NES_DBG_PHY, "PHY did not track\n");
- break;
+ if (((temp_phy_data & 0xff) == 0x0) && first_time) {
+ first_time = 0;
+ counter = 0;
+ /* reset AMCC PHY and try again */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
+ continue;
+ } else {
+ printk(PFX "PHY did not track\n");
+ break;
+ }
}
mdelay(10);
- } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
+ } while ((temp_phy_data & 0xff) < 0x30);
/* setup signal integrity */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index f28a41b..084be0e 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -546,11 +546,23 @@ enum nes_iwarp_sq_fmr_wqe_word_idx {
NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
};
+enum nes_iwarp_sq_fmr_opcodes {
+ NES_IWARP_SQ_FMR_WQE_ZERO_BASED = (1<<6),
+ NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K = (0<<7),
+ NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M = (1<<7),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ = (1<<16),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE = (1<<17),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ = (1<<18),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND = (1<<20),
+};
+
+#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK 0xFF;
+
enum nes_iwarp_sq_locinv_wqe_word_idx {
NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
};
-
enum nes_iwarp_rq_wqe_word_idx {
NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
@@ -1153,6 +1165,19 @@ struct nes_pbl {
/* TODO: need to add list for two level tables */
};
+#define NES_4K_PBL_CHUNK_SIZE 4096
+
+struct nes_fast_mr_wqe_pbl {
+ u64 *kva;
+ dma_addr_t paddr;
+};
+
+struct nes_ib_fast_reg_page_list {
+ struct ib_fast_reg_page_list ibfrpl;
+ struct nes_fast_mr_wqe_pbl nes_wqe_pbl;
+ u64 pbl;
+};
+
struct nes_listener {
struct work_struct work;
struct workqueue_struct *wq;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index de18fdf..ab11027 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index cc90c14..71e133a 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@@ -86,6 +86,7 @@ enum iwnes_memreg_type {
IWNES_MEMREG_TYPE_CQ = 0x0002,
IWNES_MEMREG_TYPE_MW = 0x0003,
IWNES_MEMREG_TYPE_FMR = 0x0004,
+ IWNES_MEMREG_TYPE_FMEM = 0x0005,
};
struct nes_mem_reg_req {
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 9687c39..729d525 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a680c42..64d3136 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -275,342 +275,236 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
}
-/**
- * nes_alloc_fmr
+/*
+ * nes_alloc_fast_mr
*/
-static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
- int ibmr_access_flags,
- struct ib_fmr_attr *ibfmr_attr)
+static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ u32 stag, u32 page_count)
{
- unsigned long flags;
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_fmr *nesfmr;
- struct nes_cqp_request *cqp_request;
struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
int ret;
- u32 stag;
- u32 stag_index = 0;
- u32 next_stag_index = 0;
- u32 driver_key = 0;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 opcode = 0;
- u8 stag_key = 0;
- int i=0;
- struct nes_vpbl vpbl;
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
-
- driver_key = 0;
-
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
-
- ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
- nesadapter->max_mr, &stag_index, &next_stag_index);
- if (ret) {
- goto failed_resource_alloc;
- }
-
- nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
- if (!nesfmr) {
- ret = -ENOMEM;
- goto failed_fmr_alloc;
- }
-
- nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
- if (ibfmr_attr->max_pages == 1) {
- /* use zero length PBL */
- nesfmr->nesmr.pbl_4k = 0;
- nesfmr->nesmr.pbls_used = 0;
- } else if (ibfmr_attr->max_pages <= 32) {
- /* use PBL 256 */
- nesfmr->nesmr.pbl_4k = 0;
- nesfmr->nesmr.pbls_used = 1;
- } else if (ibfmr_attr->max_pages <= 512) {
- /* use 4K PBLs */
- nesfmr->nesmr.pbl_4k = 1;
- nesfmr->nesmr.pbls_used = 1;
- } else {
- /* use two level 4K PBLs */
- /* add support for two level 256B PBLs */
- nesfmr->nesmr.pbl_4k = 1;
- nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
- ((ibfmr_attr->max_pages & 511) ? 1 : 0);
- }
- /* Register the region with the adapter */
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-
- /* track PBL resources */
- if (nesfmr->nesmr.pbls_used != 0) {
- if (nesfmr->nesmr.pbl_4k) {
- if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_avail;
- } else {
- nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
- }
- } else {
- if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_avail;
- } else {
- nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
- }
- }
- }
-
- /* one level pbl */
- if (nesfmr->nesmr.pbls_used == 0) {
- nesfmr->root_vpbl.pbl_vbase = NULL;
- nes_debug(NES_DBG_MR, "zero level pbl \n");
- } else if (nesfmr->nesmr.pbls_used == 1) {
- /* can change it to kmalloc & dma_map_single */
- nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &nesfmr->root_vpbl.pbl_pbase);
- if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_alloc;
- }
- nesfmr->leaf_pbl_cnt = 0;
- nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
- nesfmr->root_vpbl.pbl_vbase);
- }
- /* two level pbl */
- else {
- nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
- &nesfmr->root_vpbl.pbl_pbase);
- if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_alloc;
- }
-
- nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
- nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
- if (!nesfmr->root_vpbl.leaf_vpbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_leaf_vpbl_alloc;
- }
-
- nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
- " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
- nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
-
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
-
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
-
- if (!vpbl.pbl_vbase) {
- ret = -ENOMEM;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- goto failed_leaf_vpbl_pages_alloc;
- }
-
- nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
- nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
-
- nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
- nesfmr->root_vpbl.pbl_vbase[i].pa_low,
- nesfmr->root_vpbl.pbl_vbase[i].pa_high,
- &nesfmr->root_vpbl.leaf_vpbl[i]);
- }
- }
- nesfmr->ib_qp = NULL;
- nesfmr->access_rights =0;
+ u16 major_code;
+ u64 region_length = page_count * PAGE_SIZE;
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- ret = -ENOMEM;
- goto failed_leaf_vpbl_pages_alloc;
+ return -ENOMEM;
}
+ nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, "
+ "region_length = %llu\n",
+ page_count, region_length);
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
- stag, stag_index);
-
- opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
-
- if (nesfmr->nesmr.pbl_4k == 1)
- opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
-
- if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
- NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
- nesfmr->access_rights |=
- NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
- NES_CQP_STAG_REM_ACC_EN;
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nesadapter->free_4kpbl > 0) {
+ nesadapter->free_4kpbl--;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ } else {
+ /* No 4kpbl's available: */
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ nes_debug(NES_DBG_MR, "Out of Pbls\n");
+ nes_free_cqp_request(nesdev, cqp_request);
+ return -ENOMEM;
}
- if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
- NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
- nesfmr->access_rights |=
- NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
- NES_CQP_STAG_REM_ACC_EN;
- }
+ opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_MR |
+ NES_CQP_STAG_PBL_BLK_SIZE | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_REM_ACC_EN;
+ /*
+ * The current OFED API does not support the zero based TO option.
+ * If added then need to changed the NES_CQP_STAG_VA* option. Also,
+ * the API does not support that ability to have the MR set for local
+ * access only when created and not allow the SQ op to override. Given
+ * this the remote enable must be set here.
+ */
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, 1);
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
- cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
- (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
+ cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
+ cpu_to_le32(nespd->pd_id & 0x00007fff);
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, 0);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, 0);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, 0);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8));
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
+ barrier();
atomic_set(&cqp_request->refcount, 2);
nes_post_cqp_request(nesdev, cqp_request);
/* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- stag, ret, cqp_request->major_code, cqp_request->minor_code);
-
- if ((!ret) || (cqp_request->major_code)) {
- nes_put_cqp_request(nesdev, cqp_request);
- ret = (!ret) ? -ETIME : -EIO;
- goto failed_leaf_vpbl_pages_alloc;
- }
+ ret = wait_event_timeout(cqp_request->waitq,
+ (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+
+ nes_debug(NES_DBG_MR, "Allocate STag 0x%08X completed, "
+ "wait_event_timeout ret = %u, CQP Major:Minor codes = "
+ "0x%04X:0x%04X.\n", stag, ret, cqp_request->major_code,
+ cqp_request->minor_code);
+ major_code = cqp_request->major_code;
nes_put_cqp_request(nesdev, cqp_request);
- nesfmr->nesmr.ibfmr.lkey = stag;
- nesfmr->nesmr.ibfmr.rkey = stag;
- nesfmr->attr = *ibfmr_attr;
-
- return &nesfmr->nesmr.ibfmr;
-
- failed_leaf_vpbl_pages_alloc:
- /* unroll all allocated pages */
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
- if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
- }
- }
- if (nesfmr->root_vpbl.leaf_vpbl)
- kfree(nesfmr->root_vpbl.leaf_vpbl);
- failed_leaf_vpbl_alloc:
- if (nesfmr->leaf_pbl_cnt == 0) {
- if (nesfmr->root_vpbl.pbl_vbase)
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- } else
- pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
-
- failed_vpbl_alloc:
- if (nesfmr->nesmr.pbls_used != 0) {
+ if (!ret || major_code) {
spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesfmr->nesmr.pbl_4k)
- nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
- else
- nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
+ nesadapter->free_4kpbl++;
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
-failed_vpbl_avail:
- kfree(nesfmr);
-
- failed_fmr_alloc:
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
-
- failed_resource_alloc:
- return ERR_PTR(ret);
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ return 0;
}
-
-/**
- * nes_dealloc_fmr
+/*
+ * nes_alloc_fast_reg_mr
*/
-static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
{
- unsigned long flags;
- struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
- struct nes_fmr *nesfmr = to_nesfmr(nesmr);
- struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i = 0;
- int rc;
- /* free the resources */
- if (nesfmr->leaf_pbl_cnt == 0) {
- /* single PBL case */
- if (nesfmr->root_vpbl.pbl_vbase)
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- } else {
- for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
- }
- kfree(nesfmr->root_vpbl.leaf_vpbl);
- pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- }
- nesmr->ibmw.device = ibfmr->device;
- nesmr->ibmw.pd = ibfmr->pd;
- nesmr->ibmw.rkey = ibfmr->rkey;
- nesmr->ibmw.uobject = NULL;
+ u32 next_stag_index;
+ u8 stag_key = 0;
+ u32 driver_key = 0;
+ int err = 0;
+ u32 stag_index = 0;
+ struct nes_mr *nesmr;
+ u32 stag;
+ int ret;
+ struct ib_mr *ibmr;
+/*
+ * Note: Set to always use a fixed length single page entry PBL. This is to allow
+ * for the fast_reg_mr operation to always know the size of the PBL.
+ */
+ if (max_page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ return ERR_PTR(-E2BIG);
- rc = nes_dealloc_mw(&nesmr->ibmw);
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
- if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesfmr->nesmr.pbl_4k) {
- nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
- WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
- } else {
- nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
- WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
- }
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index,
+ &next_stag_index);
+ if (err)
+ return ERR_PTR(err);
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
}
- return rc;
-}
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+ nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
+ stag, stag_index);
-/**
- * nes_map_phys_fmr
+ ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_FMEM;
+ ibmr = &nesmr->ibmr;
+ } else {
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+ return ibmr;
+}
+
+/*
+ * nes_alloc_fast_reg_page_list
*/
-static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
+static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
+ struct ib_device *ibdev,
+ int page_list_len)
{
- return 0;
-}
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct ib_fast_reg_page_list *pifrpl;
+ struct nes_ib_fast_reg_page_list *pnesfrpl;
+ if (page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ return ERR_PTR(-E2BIG);
+ /*
+ * Allocate the ib_fast_reg_page_list structure, the
+ * nes_fast_bpl structure, and the PLB table.
+ */
+ pnesfrpl = kmalloc(sizeof(struct nes_ib_fast_reg_page_list) +
+ page_list_len * sizeof(u64), GFP_KERNEL);
+
+ if (!pnesfrpl)
+ return ERR_PTR(-ENOMEM);
-/**
- * nes_unmap_frm
+ pifrpl = &pnesfrpl->ibfrpl;
+ pifrpl->page_list = &pnesfrpl->pbl;
+ pifrpl->max_page_list_len = page_list_len;
+ /*
+ * Allocate the WQE PBL
+ */
+ pnesfrpl->nes_wqe_pbl.kva = pci_alloc_consistent(nesdev->pcidev,
+ page_list_len * sizeof(u64),
+ &pnesfrpl->nes_wqe_pbl.paddr);
+
+ if (!pnesfrpl->nes_wqe_pbl.kva) {
+ kfree(pnesfrpl);
+ return ERR_PTR(-ENOMEM);
+ }
+ nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
+ "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
+ "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
+ pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
+ (void *)pnesfrpl->nes_wqe_pbl.paddr);
+
+ return pifrpl;
+}
+
+/*
+ * nes_free_fast_reg_page_list
*/
-static int nes_unmap_fmr(struct list_head *ibfmr_list)
+static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl)
{
- return 0;
+ struct nes_vnic *nesvnic = to_nesvnic(pifrpl->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_ib_fast_reg_page_list *pnesfrpl;
+
+ pnesfrpl = container_of(pifrpl, struct nes_ib_fast_reg_page_list, ibfrpl);
+ /*
+ * Free the WQE PBL.
+ */
+ pci_free_consistent(nesdev->pcidev,
+ pifrpl->max_page_list_len * sizeof(u64),
+ pnesfrpl->nes_wqe_pbl.kva,
+ pnesfrpl->nes_wqe_pbl.paddr);
+ /*
+ * Free the PBL structure
+ */
+ kfree(pnesfrpl);
}
-
-
/**
* nes_query_device
*/
@@ -633,23 +527,23 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
props->max_sge = nesdev->nesadapter->max_sge;
props->max_cq = nesibdev->max_cq;
- props->max_cqe = nesdev->nesadapter->max_cqe - 1;
+ props->max_cqe = nesdev->nesadapter->max_cqe;
props->max_mr = nesibdev->max_mr;
props->max_mw = nesibdev->max_mr;
props->max_pd = nesibdev->max_pd;
props->max_sge_rd = 1;
switch (nesdev->nesadapter->max_irrq_wr) {
case 0:
- props->max_qp_rd_atom = 1;
+ props->max_qp_rd_atom = 2;
break;
case 1:
- props->max_qp_rd_atom = 4;
+ props->max_qp_rd_atom = 8;
break;
case 2:
- props->max_qp_rd_atom = 16;
+ props->max_qp_rd_atom = 32;
break;
case 3:
- props->max_qp_rd_atom = 32;
+ props->max_qp_rd_atom = 64;
break;
default:
props->max_qp_rd_atom = 0;
@@ -1121,6 +1015,7 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
kunmap(nesqp->page);
return -ENOMEM;
}
+ nesqp->sq_kmapped = 1;
nesqp->hwqp.q2_vbase = mem;
mem += 256;
memset(nesqp->hwqp.q2_vbase, 0, 256);
@@ -1198,7 +1093,10 @@ static inline void nes_free_qp_mem(struct nes_device *nesdev,
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
nesqp->pbl_vbase = NULL;
- kunmap(nesqp->page);
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
}
}
@@ -1504,8 +1402,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
spin_lock_init(&nesqp->lock);
- init_waitqueue_head(&nesqp->state_waitq);
- init_waitqueue_head(&nesqp->kick_waitq);
nes_add_ref(&nesqp->ibqp);
break;
default:
@@ -1513,6 +1409,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-EINVAL);
}
+ nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
+
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
@@ -1607,8 +1505,10 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
}
}
- if (nesqp->pbl_pbase)
+ if (nesqp->pbl_pbase && nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
kunmap(nesqp->page);
+ }
} else {
/* Clean any pending completions from the cq(s) */
if (nesqp->nesscq)
@@ -1649,6 +1549,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
unsigned long flags;
int ret;
+ if (entries > nesadapter->max_cqe)
+ return ERR_PTR(-EINVAL);
+
err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
if (err) {
@@ -2606,9 +2509,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
stag = stag_index << 8;
stag |= driver_key;
stag += (u32)stag_key;
- if (stag == 0) {
- stag = 1;
- }
iova_start = virt;
/* Make the leaf PBL the root if only one PBL */
@@ -3109,7 +3009,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
" already done based on hw state.\n",
nesqp->hwqp.qp_id);
issue_modify_qp = 0;
- nesqp->in_disconnect = 0;
}
switch (nesqp->hw_iwarp_state) {
case NES_AEQE_IWARP_STATE_CLOSING:
@@ -3122,7 +3021,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
break;
default:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->in_disconnect = 1;
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
break;
}
@@ -3139,7 +3037,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
issue_modify_qp = 1;
- nesqp->in_disconnect = 1;
break;
case IB_QPS_ERR:
case IB_QPS_RESET:
@@ -3162,7 +3059,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
next_iwarp_state |= NES_CQP_QP_RESET;
- nesqp->in_disconnect = 1;
} else {
nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
@@ -3373,21 +3269,17 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_qp *nesqp = to_nesqp(ibqp);
struct nes_hw_qp_wqe *wqe;
- int err;
+ int err = 0;
u32 qsize = nesqp->hwqp.sq_size;
u32 head;
- u32 wqe_misc;
- u32 wqe_count;
+ u32 wqe_misc = 0;
+ u32 wqe_count = 0;
u32 counter;
- u32 total_payload_length;
-
- err = 0;
- wqe_misc = 0;
- wqe_count = 0;
- total_payload_length = 0;
- if (nesqp->ibqp_state > IB_QPS_RTS)
- return -EINVAL;
+ if (nesqp->ibqp_state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
spin_lock_irqsave(&nesqp->lock, flags);
@@ -3413,94 +3305,208 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
u64temp = (u64)(ib_wr->wr_id);
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
u64temp);
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- if (ib_wr->send_flags & IB_SEND_SOLICITED) {
- wqe_misc = NES_IWARP_SQ_OP_SENDSE;
- } else {
- wqe_misc = NES_IWARP_SQ_OP_SEND;
- }
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
- }
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (IB_WR_SEND == ib_wr->opcode) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ wqe_misc = NES_IWARP_SQ_OP_SENDSE;
+ else
+ wqe_misc = NES_IWARP_SQ_OP_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ wqe_misc = NES_IWARP_SQ_OP_SENDSEINV;
+ else
+ wqe_misc = NES_IWARP_SQ_OP_SENDINV;
- break;
- case IB_WR_RDMA_WRITE:
- wqe_misc = NES_IWARP_SQ_OP_RDMAW;
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
- ib_wr->num_sge,
- nesdev->nesadapter->max_sge);
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
- }
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
+ ib_wr->ex.invalidate_rkey);
+ }
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
-
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
- break;
- case IB_WR_RDMA_READ:
- /* iWARP only supports 1 sge for RDMA reads */
- if (ib_wr->num_sge > 1) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
- ib_wr->num_sge);
- err = -EINVAL;
- break;
- }
- wqe_misc = NES_IWARP_SQ_OP_RDMAR;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
- ib_wr->sg_list->length);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
- ib_wr->sg_list->addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
- ib_wr->sg_list->lkey);
- break;
- default:
- /* error */
- err = -EINVAL;
- break;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
}
- if (ib_wr->send_flags & IB_SEND_SIGNALED) {
- wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe_misc = NES_IWARP_SQ_OP_RDMAW;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
+ ib_wr->num_sge, nesdev->nesadapter->max_sge);
+ err = -EINVAL;
+ break;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ /* iWARP only supports 1 sge for RDMA reads */
+ if (ib_wr->num_sge > 1) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
+ ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->opcode == IB_WR_RDMA_READ) {
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR;
+ } else {
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR_LOCINV;
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
+ ib_wr->ex.invalidate_rkey);
+ }
+
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
+ ib_wr->sg_list->length);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ ib_wr->sg_list->addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+ ib_wr->sg_list->lkey);
+ break;
+ case IB_WR_LOCAL_INV:
+ wqe_misc = NES_IWARP_SQ_OP_LOCINV;
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX,
+ ib_wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_FAST_REG_MR:
+ {
+ int i;
+ int flags = ib_wr->wr.fast_reg.access_flags;
+ struct nes_ib_fast_reg_page_list *pnesfrpl =
+ container_of(ib_wr->wr.fast_reg.page_list,
+ struct nes_ib_fast_reg_page_list,
+ ibfrpl);
+ u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
+ u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
+
+ if (ib_wr->wr.fast_reg.page_list_len >
+ (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
+ err = -EINVAL;
+ break;
+ }
+ wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
+ ib_wr->wr.fast_reg.iova_start);
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
+ ib_wr->wr.fast_reg.length);
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
+ ib_wr->wr.fast_reg.rkey);
+ /* Set page size: */
+ if (ib_wr->wr.fast_reg.page_shift == 12) {
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
+ } else if (ib_wr->wr.fast_reg.page_shift == 21) {
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
+ } else {
+ nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
+ " ib_wr=%u, max=1\n", ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ /* Set access_flags */
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ;
+ if (flags & IB_ACCESS_LOCAL_WRITE)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE;
+
+ if (flags & IB_ACCESS_REMOTE_WRITE)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE;
+
+ if (flags & IB_ACCESS_REMOTE_READ)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ;
+
+ if (flags & IB_ACCESS_MW_BIND)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
+
+ /* Fill in PBL info: */
+ if (ib_wr->wr.fast_reg.page_list_len >
+ pnesfrpl->ibfrpl.max_page_list_len) {
+ nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
+ " ib_wr=%p, value=%u, max=%u\n",
+ ib_wr, ib_wr->wr.fast_reg.page_list_len,
+ pnesfrpl->ibfrpl.max_page_list_len);
+ err = -EINVAL;
+ break;
+ }
+
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX,
+ pnesfrpl->nes_wqe_pbl.paddr);
+
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
+ ib_wr->wr.fast_reg.page_list_len * 8);
+
+ for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
+ dst_page_list[i] = cpu_to_le64(src_page_list[i]);
+
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
+ "length: %d, rkey: %0x, pgl_paddr: %p, "
+ "page_list_len: %u, wqe_misc: %x\n",
+ (void *)ib_wr->wr.fast_reg.iova_start,
+ ib_wr->wr.fast_reg.length,
+ ib_wr->wr.fast_reg.rkey,
+ (void *)pnesfrpl->nes_wqe_pbl.paddr,
+ ib_wr->wr.fast_reg.page_list_len,
+ wqe_misc);
+ break;
+ }
+ default:
+ /* error */
+ err = -EINVAL;
+ break;
}
+
+ if (err)
+ break;
+
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || nesqp->sig_all)
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
ib_wr = ib_wr->next;
@@ -3522,6 +3528,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
spin_unlock_irqrestore(&nesqp->lock, flags);
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -3548,8 +3555,10 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
u32 counter;
u32 total_payload_length;
- if (nesqp->ibqp_state > IB_QPS_RTS)
- return -EINVAL;
+ if (nesqp->ibqp_state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
spin_lock_irqsave(&nesqp->lock, flags);
@@ -3612,6 +3621,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
spin_unlock_irqrestore(&nesqp->lock, flags);
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -3720,6 +3730,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
nes_debug(NES_DBG_CQ, "Operation = Send.\n");
entry->opcode = IB_WC_SEND;
break;
+ case NES_IWARP_SQ_OP_LOCINV:
+ entry->opcode = IB_WR_LOCAL_INV;
+ break;
+ case NES_IWARP_SQ_OP_FAST_REG:
+ entry->opcode = IB_WC_FAST_REG_MR;
+ break;
}
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
@@ -3890,10 +3906,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
nesibdev->ibdev.bind_mw = nes_bind_mw;
- nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
- nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
- nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
- nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
+ nesibdev->ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
+ nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
+ nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
nesibdev->ibdev.attach_mcast = nes_multicast_attach;
nesibdev->ibdev.detach_mcast = nes_multicast_detach;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 89822d7..2df9993e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -135,19 +135,15 @@ struct nes_qp {
struct ib_qp ibqp;
void *allocated_buffer;
struct iw_cm_id *cm_id;
- struct workqueue_struct *wq;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
struct ietf_mpa_frame *ietf_frame;
dma_addr_t ietf_frame_pbase;
- wait_queue_head_t state_waitq;
struct ib_mr *lsmm_mr;
- unsigned long socket;
struct nes_hw_qp hwqp;
struct work_struct work;
- struct work_struct ae_work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 hte_index;
@@ -165,19 +161,20 @@ struct nes_qp {
struct page *page;
struct timer_list terminate_timer;
enum ib_event_type terminate_eventtype;
- wait_queue_head_t kick_waitq;
- u16 in_disconnect;
+ u16 active_conn:1;
+ u16 skip_lsmm:1;
+ u16 user_mode:1;
+ u16 hte_added:1;
+ u16 flush_issued:1;
+ u16 destroyed:1;
+ u16 sig_all:1;
+ u16 rsvd:9;
u16 private_data_len;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
- u8 active_conn;
- u8 skip_lsmm;
- u8 user_mode;
- u8 hte_added;
u8 hw_iwarp_state;
- u8 flush_issued;
u8 hw_tcp_state;
u8 term_flags;
- u8 destroyed;
+ u8 sq_kmapped;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2bf5116..df3eb8c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -884,6 +884,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
neigh->neighbour = neighbour;
neigh->dev = dev;
+ memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
*to_ipoib_neigh(neighbour) = neigh;
skb_queue_head_init(&neigh->queue);
ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index b9453d0..274c883 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
mem_copy->copy_buf = NULL;
}
+#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
+
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
@@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/
+
static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
- struct scatterlist *sg;
- u64 first_addr, last_addr, page;
- int end_aligned;
- unsigned int cur_page = 0;
+ struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
+ u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
- int i;
+ unsigned int dma_len;
+ int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+ new_chunk = 1;
+ cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
-
+ start_addr = ib_sg_dma_address(ibdev, sg);
+ if (new_chunk)
+ chunk_start = start_addr;
+ dma_len = ib_sg_dma_len(ibdev, sg);
+ end_addr = start_addr + dma_len;
total_sz += dma_len;
- first_addr = ib_sg_dma_address(ibdev, sg);
- last_addr = first_addr + dma_len;
-
- end_aligned = !(last_addr & ~MASK_4K);
-
- /* continue to collect page fragments till aligned or SG ends */
- while (!end_aligned && (i + 1 < data->dma_nents)) {
- sg = sg_next(sg);
- i++;
- dma_len = ib_sg_dma_len(ibdev, sg);
- total_sz += dma_len;
- last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
- end_aligned = !(last_addr & ~MASK_4K);
+ /* collect page fragments until aligned or end of SG list */
+ if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
+ new_chunk = 0;
+ continue;
}
-
- /* handle the 1st page in the 1st DMA element */
- if (cur_page == 0) {
- page = first_addr & MASK_4K;
- page_vec->pages[cur_page] = page;
- cur_page++;
+ new_chunk = 1;
+
+ /* address of the first page in the contiguous chunk;
+ masking relevant for the very first SG entry,
+ which might be unaligned */
+ page = chunk_start & MASK_4K;
+ do {
+ page_vec->pages[cur_page++] = page;
page += SIZE_4K;
- } else
- page = first_addr;
-
- for (; page < last_addr; page += SIZE_4K) {
- page_vec->pages[cur_page] = page;
- cur_page++;
- }
-
+ } while (page < end_addr);
}
+
page_vec->data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
return cur_page;
}
-#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
- struct ib_device *ibdev)
+static int iser_data_buf_aligned_len(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
- struct scatterlist *sgl, *sg;
- u64 end_addr, next_addr;
- int i, cnt;
- unsigned int ret_len = 0;
+ struct scatterlist *sgl, *sg, *next_sg = NULL;
+ u64 start_addr, end_addr;
+ int i, ret_len, start_check = 0;
+
+ if (data->dma_nents == 1)
+ return 1;
sgl = (struct scatterlist *)data->buf;
+ start_addr = ib_sg_dma_address(ibdev, sgl);
- cnt = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
- "offset: %ld sz: %ld\n", i,
- (unsigned long)sg_phys(sg),
- (unsigned long)sg->offset,
- (unsigned long)sg->length); */
- end_addr = ib_sg_dma_address(ibdev, sg) +
- ib_sg_dma_len(ibdev, sg);
- /* iser_dbg("Checking sg iobuf end address "
- "0x%08lX\n", end_addr); */
- if (i + 1 < data->dma_nents) {
- next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
- /* are i, i+1 fragments of the same page? */
- if (end_addr == next_addr) {
- cnt++;
- continue;
- } else if (!IS_4K_ALIGNED(end_addr)) {
- ret_len = cnt + 1;
- break;
- }
- }
- cnt++;
+ if (start_check && !IS_4K_ALIGNED(start_addr))
+ break;
+
+ next_sg = sg_next(sg);
+ if (!next_sg)
+ break;
+
+ end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
+ start_addr = ib_sg_dma_address(ibdev, next_sg);
+
+ if (end_addr == start_addr) {
+ start_check = 0;
+ continue;
+ } else
+ start_check = 1;
+
+ if (!IS_4K_ALIGNED(end_addr))
+ break;
}
- if (i == data->dma_nents)
- ret_len = cnt; /* loop ended */
+ ret_len = (next_sg) ? i : i+1;
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data);
return ret_len;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index dee6706..258c639 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -59,7 +59,8 @@ static void evdev_pass_event(struct evdev_client *client,
client->head &= EVDEV_BUFFER_SIZE - 1;
spin_unlock(&client->buffer_lock);
- kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ if (event->type == EV_SYN)
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
/*
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index b483b29..f967008 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -221,11 +221,27 @@ static int get_compatible_type(struct ff_device *ff, int effect_type)
}
/*
+ * Only left/right direction should be used (under/over 0x8000) for
+ * forward/reverse motor direction (to keep calculation fast & simple).
+ */
+static u16 ml_calculate_direction(u16 direction, u16 force,
+ u16 new_direction, u16 new_force)
+{
+ if (!force)
+ return new_direction;
+ if (!new_force)
+ return direction;
+ return (((u32)(direction >> 1) * force +
+ (new_direction >> 1) * new_force) /
+ (force + new_force)) << 1;
+}
+
+/*
* Combine two effects and apply gain.
*/
static void ml_combine_effects(struct ff_effect *effect,
struct ml_effect_state *state,
- unsigned int gain)
+ int gain)
{
struct ff_effect *new = state->effect;
unsigned int strong, weak, i;
@@ -252,8 +268,21 @@ static void ml_combine_effects(struct ff_effect *effect,
break;
case FF_RUMBLE:
- strong = new->u.rumble.strong_magnitude * gain / 0xffff;
- weak = new->u.rumble.weak_magnitude * gain / 0xffff;
+ strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff;
+ weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff;
+
+ if (effect->u.rumble.strong_magnitude + strong)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.strong_magnitude,
+ new->direction, strong);
+ else if (effect->u.rumble.weak_magnitude + weak)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.weak_magnitude,
+ new->direction, weak);
+ else
+ effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(strong + effect->u.rumble.strong_magnitude,
0xffffU);
@@ -268,6 +297,13 @@ static void ml_combine_effects(struct ff_effect *effect,
/* here we also scale it 0x7fff => 0xffff */
i = i * gain / 0x7fff;
+ if (effect->u.rumble.strong_magnitude + i)
+ effect->direction = ml_calculate_direction(
+ effect->direction,
+ effect->u.rumble.strong_magnitude,
+ new->direction, i);
+ else
+ effect->direction = 0;
effect->u.rumble.strong_magnitude =
min(i + effect->u.rumble.strong_magnitude, 0xffffU);
effect->u.rumble.weak_magnitude =
@@ -411,8 +447,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
msecs_to_jiffies(state->effect->replay.length);
state->adj_at = state->play_at;
- ml_schedule_timer(ml);
-
} else {
debug("initiated stop");
@@ -420,10 +454,10 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
__set_bit(FF_EFFECT_ABORTING, &state->flags);
else
__clear_bit(FF_EFFECT_STARTED, &state->flags);
-
- ml_play_effects(ml);
}
+ ml_play_effects(ml);
+
return 0;
}
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index aa6713b..291d939 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
struct input_polled_dev *dev = input_get_drvdata(input);
cancel_delayed_work_sync(&dev->work);
+ /*
+ * Clean up work struct to remove references to the workqueue.
+ * It may be destroyed by the next call. This causes problems
+ * at next device open-close in case of poll_interval == 0.
+ */
+ INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
input_polldev_stop_workqueue();
if (dev->close)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5c16001..86cb2d2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -24,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/smp_lock.h>
+#include "input-compat.h"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("Input core");
@@ -45,6 +46,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
ABS_MT_TOOL_TYPE,
ABS_MT_BLOB_ID,
ABS_MT_TRACKING_ID,
+ ABS_MT_PRESSURE,
0
};
static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
@@ -296,9 +298,15 @@ static void input_handle_event(struct input_dev *dev,
* @value: value of the event
*
* This function should be used by drivers implementing various input
- * devices. See also input_inject_event().
+ * devices to report input events. See also input_inject_event().
+ *
+ * NOTE: input_event() may be safely used right after input device was
+ * allocated with input_allocate_device(), even before it is registered
+ * with input_register_device(), but the event will not reach any of the
+ * input handlers. Such early invocation of input_event() may be used
+ * to 'seed' initial state of a switch or initial position of absolute
+ * axis, etc.
*/
-
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
@@ -758,6 +766,40 @@ static int input_attach_handler(struct input_dev *dev, struct input_handler *han
return error;
}
+#ifdef CONFIG_COMPAT
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ int len = 0;
+
+ if (INPUT_COMPAT_TEST) {
+ u32 dword = bits >> 32;
+ if (dword || !skip_empty)
+ len += snprintf(buf, buf_size, "%x ", dword);
+
+ dword = bits & 0xffffffffUL;
+ if (dword || !skip_empty || len)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%x", dword);
+ } else {
+ if (bits || !skip_empty)
+ len += snprintf(buf, buf_size, "%lx", bits);
+ }
+
+ return len;
+}
+
+#else /* !CONFIG_COMPAT */
+
+static int input_bits_to_string(char *buf, int buf_size,
+ unsigned long bits, bool skip_empty)
+{
+ return bits || !skip_empty ?
+ snprintf(buf, buf_size, "%lx", bits) : 0;
+}
+
+#endif
#ifdef CONFIG_PROC_FS
@@ -826,14 +868,25 @@ static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
unsigned long *bitmap, int max)
{
int i;
-
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
+ bool skip_empty = true;
+ char buf[18];
seq_printf(seq, "B: %s=", name);
- for (; i >= 0; i--)
- seq_printf(seq, "%lx%s", bitmap[i], i > 0 ? " " : "");
+
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ if (input_bits_to_string(buf, sizeof(buf),
+ bitmap[i], skip_empty)) {
+ skip_empty = false;
+ seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
+ }
+ }
+
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (skip_empty)
+ seq_puts(seq, "0");
+
seq_putc(seq, '\n');
}
@@ -1122,14 +1175,23 @@ static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
{
int i;
int len = 0;
+ bool skip_empty = true;
+
+ for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
+ len += input_bits_to_string(buf + len, max(buf_size - len, 0),
+ bitmap[i], skip_empty);
+ if (len) {
+ skip_empty = false;
+ if (i > 0)
+ len += snprintf(buf + len, max(buf_size - len, 0), " ");
+ }
+ }
- for (i = BITS_TO_LONGS(max) - 1; i > 0; i--)
- if (bitmap[i])
- break;
-
- for (; i >= 0; i--)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%lx%s", bitmap[i], i > 0 ? " " : "");
+ /*
+ * If no output was produced print a single 0.
+ */
+ if (len == 0)
+ len = snprintf(buf, buf_size, "%d", 0);
if (add_cr)
len += snprintf(buf + len, max(buf_size - len, 0), "\n");
@@ -1144,7 +1206,8 @@ static ssize_t input_dev_show_cap_##bm(struct device *dev, \
{ \
struct input_dev *input_dev = to_input_dev(dev); \
int len = input_print_bitmap(buf, PAGE_SIZE, \
- input_dev->bm##bit, ev##_MAX, 1); \
+ input_dev->bm##bit, ev##_MAX, \
+ true); \
return min_t(int, len, PAGE_SIZE); \
} \
static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
@@ -1208,7 +1271,7 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
len = input_print_bitmap(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen,
- bitmap, max, 0);
+ bitmap, max, false);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 67c207f..45ac70e 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -277,7 +277,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
}
#ifdef RESET_WORKS
- if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) ||
+ if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) &&
(gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) {
err = -ENODEV;
goto fail2;
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index f6c688c..b1edd77 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -210,7 +210,7 @@ static int iforce_open(struct input_dev *dev)
return 0;
}
-static void iforce_release(struct input_dev *dev)
+static void iforce_close(struct input_dev *dev)
{
struct iforce *iforce = input_get_drvdata(dev);
int i;
@@ -228,30 +228,17 @@ static void iforce_release(struct input_dev *dev)
/* Disable force feedback playback */
iforce_send_packet(iforce, FF_CMD_ENABLE, "\001");
+ /* Wait for the command to complete */
+ wait_event_interruptible(iforce->wait,
+ !test_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags));
}
switch (iforce->bus) {
#ifdef CONFIG_JOYSTICK_IFORCE_USB
- case IFORCE_USB:
- usb_kill_urb(iforce->irq);
-
- /* The device was unplugged before the file
- * was released */
- if (iforce->usbdev == NULL) {
- iforce_delete_device(iforce);
- kfree(iforce);
- }
- break;
-#endif
- }
-}
-
-void iforce_delete_device(struct iforce *iforce)
-{
- switch (iforce->bus) {
-#ifdef CONFIG_JOYSTICK_IFORCE_USB
case IFORCE_USB:
- iforce_usb_delete(iforce);
+ usb_kill_urb(iforce->irq);
+ usb_kill_urb(iforce->out);
+ usb_kill_urb(iforce->ctrl);
break;
#endif
#ifdef CONFIG_JOYSTICK_IFORCE_232
@@ -303,7 +290,7 @@ int iforce_init_device(struct iforce *iforce)
input_dev->name = "Unknown I-Force device";
input_dev->open = iforce_open;
- input_dev->close = iforce_release;
+ input_dev->close = iforce_close;
/*
* On-device memory allocation.
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 9f289d8..b41303d 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -109,6 +109,7 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = urb->context;
if (urb->status) {
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dbg("urb->status %d, exiting", urb->status);
return;
}
@@ -186,33 +187,19 @@ fail:
return err;
}
-/* Called by iforce_delete() */
-void iforce_usb_delete(struct iforce* iforce)
-{
- usb_kill_urb(iforce->irq);
- usb_kill_urb(iforce->out);
- usb_kill_urb(iforce->ctrl);
-
- usb_free_urb(iforce->irq);
- usb_free_urb(iforce->out);
- usb_free_urb(iforce->ctrl);
-}
-
static void iforce_usb_disconnect(struct usb_interface *intf)
{
struct iforce *iforce = usb_get_intfdata(intf);
- int open = 0; /* FIXME! iforce->dev.handle->open; */
usb_set_intfdata(intf, NULL);
- if (iforce) {
- iforce->usbdev = NULL;
- input_unregister_device(iforce->dev);
- if (!open) {
- iforce_delete_device(iforce);
- kfree(iforce);
- }
- }
+ input_unregister_device(iforce->dev);
+
+ usb_free_urb(iforce->irq);
+ usb_free_urb(iforce->out);
+ usb_free_urb(iforce->ctrl);
+
+ kfree(iforce);
}
static struct usb_device_id iforce_usb_ids [] = {
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index f2d91f4..9f494b7 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -150,11 +150,9 @@ void iforce_serial_xmit(struct iforce *iforce);
/* iforce-usb.c */
void iforce_usb_xmit(struct iforce *iforce);
-void iforce_usb_delete(struct iforce *iforce);
/* iforce-main.c */
int iforce_init_device(struct iforce *iforce);
-void iforce_delete_device(struct iforce *iforce);
/* iforce-packets.c */
int iforce_control_playback(struct iforce*, u16 id, unsigned int);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 482cb12..8a28fb7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -446,7 +446,7 @@ static void xpad_irq_in(struct urb *urb)
}
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
@@ -571,7 +571,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data,
xpad->odata[6] = 0x00;
xpad->odata[7] = 0x00;
xpad->irq_out->transfer_buffer_length = 8;
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
}
return 0;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index a357357..7b40562 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -134,7 +134,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
#define ATKBD_CMD_GETID 0x02f2
#define ATKBD_CMD_SETREP 0x10f3
#define ATKBD_CMD_ENABLE 0x00f4
-#define ATKBD_CMD_RESET_DIS 0x00f5
+#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
+#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
#define ATKBD_CMD_SETALL_MBR 0x00fa
#define ATKBD_CMD_RESET_BAT 0x02ff
#define ATKBD_CMD_RESEND 0x00fe
@@ -224,8 +225,10 @@ struct atkbd {
struct delayed_work event_work;
unsigned long event_jiffies;
- struct mutex event_mutex;
unsigned long event_mask;
+
+ /* Serializes reconnect(), attr->set() and event work */
+ struct mutex mutex;
};
/*
@@ -576,7 +579,7 @@ static void atkbd_event_work(struct work_struct *work)
{
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
- mutex_lock(&atkbd->event_mutex);
+ mutex_lock(&atkbd->mutex);
if (!atkbd->enabled) {
/*
@@ -595,7 +598,7 @@ static void atkbd_event_work(struct work_struct *work)
atkbd_set_repeat_rate(atkbd);
}
- mutex_unlock(&atkbd->event_mutex);
+ mutex_unlock(&atkbd->mutex);
}
/*
@@ -611,7 +614,7 @@ static void atkbd_schedule_event_work(struct atkbd *atkbd, int event_bit)
atkbd->event_jiffies = jiffies;
set_bit(event_bit, &atkbd->event_mask);
- wmb();
+ mb();
schedule_delayed_work(&atkbd->event_work, delay);
}
@@ -836,7 +839,7 @@ static void atkbd_cleanup(struct serio *serio)
struct atkbd *atkbd = serio_get_drvdata(serio);
atkbd_disable(atkbd);
- ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_BAT);
+ ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_DEF);
}
@@ -848,13 +851,20 @@ static void atkbd_disconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
+ sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
+
atkbd_disable(atkbd);
- /* make sure we don't have a command in flight */
+ input_unregister_device(atkbd->dev);
+
+ /*
+ * Make sure we don't have a command in flight.
+ * Note that since atkbd->enabled is false event work will keep
+ * rescheduling itself until it gets canceled and will not try
+ * accessing freed input device or serio port.
+ */
cancel_delayed_work_sync(&atkbd->event_work);
- sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
- input_unregister_device(atkbd->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
kfree(atkbd);
@@ -1086,7 +1096,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work);
- mutex_init(&atkbd->event_mutex);
+ mutex_init(&atkbd->mutex);
switch (serio->id.type) {
@@ -1159,19 +1169,23 @@ static int atkbd_reconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
struct serio_driver *drv = serio->drv;
+ int retval = -1;
if (!atkbd || !drv) {
printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
+ mutex_lock(&atkbd->mutex);
+
atkbd_disable(atkbd);
if (atkbd->write) {
if (atkbd_probe(atkbd))
- return -1;
+ goto out;
+
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
- return -1;
+ goto out;
atkbd_activate(atkbd);
@@ -1189,8 +1203,11 @@ static int atkbd_reconnect(struct serio *serio)
}
atkbd_enable(atkbd);
+ retval = 0;
- return 0;
+ out:
+ mutex_unlock(&atkbd->mutex);
+ return retval;
}
static struct serio_device_id atkbd_serio_ids[] = {
@@ -1234,47 +1251,28 @@ static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
ssize_t (*handler)(struct atkbd *, char *))
{
struct serio *serio = to_serio_port(dev);
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- retval = handler((struct atkbd *)serio_get_drvdata(serio), buf);
+ struct atkbd *atkbd = serio_get_drvdata(serio);
-out:
- serio_unpin_driver(serio);
- return retval;
+ return handler(atkbd, buf);
}
static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
ssize_t (*handler)(struct atkbd *, const char *, size_t))
{
struct serio *serio = to_serio_port(dev);
- struct atkbd *atkbd;
+ struct atkbd *atkbd = serio_get_drvdata(serio);
int retval;
- retval = serio_pin_driver(serio);
+ retval = mutex_lock_interruptible(&atkbd->mutex);
if (retval)
return retval;
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- atkbd = serio_get_drvdata(serio);
atkbd_disable(atkbd);
retval = handler(atkbd, buf, count);
atkbd_enable(atkbd);
-out:
- serio_unpin_driver(serio);
+ mutex_unlock(&atkbd->mutex);
+
return retval;
}
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index 6e52d85..d410d7a 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -174,6 +174,14 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
int error, i;
+ if (pdata->device_enable) {
+ error = pdata->device_enable(dev);
+ if (error < 0) {
+ dev_dbg(dev, "device enable function failed\n");
+ return error;
+ }
+ }
+
if (!pdata->keymap) {
dev_dbg(dev, "no keymap from pdata\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 181d30e..e457404 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -22,11 +22,11 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input.h>
#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/input/matrix_keypad.h>
#include <mach/hardware.h>
-#include <mach/gpio.h>
#include <mach/ep93xx_keypad.h>
/*
@@ -60,38 +60,37 @@
#define KEY_REG_KEY1_MASK (0x0000003f)
#define KEY_REG_KEY1_SHIFT (0)
-#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
-#define keypad_writel(v, off) __raw_writel((v), keypad->mmio_base + (off))
-
-#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
+#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
struct ep93xx_keypad {
struct ep93xx_keypad_platform_data *pdata;
-
- struct clk *clk;
struct input_dev *input_dev;
+ struct clk *clk;
+
void __iomem *mmio_base;
- int irq;
- int enabled;
+ unsigned int matrix_keycodes[EP93XX_MATRIX_SIZE];
int key1;
int key2;
- unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
+ int irq;
+
+ bool enabled;
};
static void ep93xx_keypad_build_keycode(struct ep93xx_keypad *keypad)
{
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
+ unsigned int *key;
int i;
- for (i = 0; i < pdata->matrix_key_map_size; i++) {
- unsigned int key = pdata->matrix_key_map[i];
- int row = (key >> 28) & 0xf;
- int col = (key >> 24) & 0xf;
- int code = key & 0xffffff;
+ key = &pdata->matrix_key_map[0];
+ for (i = 0; i < pdata->matrix_key_map_size; i++, key++) {
+ int row = KEY_ROW(*key);
+ int col = KEY_COL(*key);
+ int code = KEY_VAL(*key);
keypad->matrix_keycodes[(row << 3) + col] = code;
__set_bit(code, input_dev->keybit);
@@ -102,9 +101,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id)
{
struct ep93xx_keypad *keypad = dev_id;
struct input_dev *input_dev = keypad->input_dev;
- unsigned int status = keypad_readl(KEY_REG);
+ unsigned int status;
int keycode, key1, key2;
+ status = __raw_readl(keypad->mmio_base + KEY_REG);
+
keycode = (status & KEY_REG_KEY1_MASK) >> KEY_REG_KEY1_SHIFT;
key1 = keypad->matrix_keycodes[keycode];
@@ -152,7 +153,10 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- clk_set_rate(keypad->clk, pdata->flags & EP93XX_KEYPAD_KDIV);
+ if (pdata->flags & EP93XX_KEYPAD_KDIV)
+ clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
+ else
+ clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
@@ -167,7 +171,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK);
- keypad_writel(val, KEY_INIT);
+ __raw_writel(val, keypad->mmio_base + KEY_INIT);
}
static int ep93xx_keypad_open(struct input_dev *pdev)
@@ -177,7 +181,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
clk_enable(keypad->clk);
- keypad->enabled = 1;
+ keypad->enabled = true;
}
return 0;
@@ -189,7 +193,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
if (keypad->enabled) {
clk_disable(keypad->clk);
- keypad->enabled = 0;
+ keypad->enabled = false;
}
}
@@ -211,7 +215,7 @@ static int ep93xx_keypad_suspend(struct platform_device *pdev,
if (keypad->enabled) {
clk_disable(keypad->clk);
- keypad->enabled = 0;
+ keypad->enabled = false;
}
mutex_unlock(&input_dev->mutex);
@@ -236,7 +240,7 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
clk_enable(keypad->clk);
- keypad->enabled = 1;
+ keypad->enabled = true;
}
}
@@ -252,88 +256,56 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
- struct ep93xx_keypad_platform_data *pdata = pdev->dev.platform_data;
struct input_dev *input_dev;
struct resource *res;
- int irq, err, i, gpio;
-
- if (!pdata ||
- !pdata->matrix_key_rows ||
- pdata->matrix_key_rows > MAX_MATRIX_KEY_ROWS ||
- !pdata->matrix_key_cols ||
- pdata->matrix_key_cols > MAX_MATRIX_KEY_COLS) {
- dev_err(&pdev->dev, "invalid or missing platform data\n");
- return -EINVAL;
- }
+ int err;
keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL);
- if (!keypad) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ if (!keypad)
return -ENOMEM;
- }
- keypad->pdata = pdata;
+ keypad->pdata = pdev->dev.platform_data;
+ if (!keypad->pdata) {
+ err = -EINVAL;
+ goto failed_free;
+ }
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get keypad irq\n");
+ keypad->irq = platform_get_irq(pdev, 0);
+ if (!keypad->irq) {
err = -ENXIO;
goto failed_free;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
err = -ENXIO;
goto failed_free;
}
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
err = -EBUSY;
goto failed_free;
}
keypad->mmio_base = ioremap(res->start, resource_size(res));
if (keypad->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
err = -ENXIO;
goto failed_free_mem;
}
- /* Request the needed GPIO's */
- gpio = EP93XX_GPIO_LINE_ROW0;
- for (i = 0; i < keypad->pdata->matrix_key_rows; i++, gpio++) {
- err = gpio_request(gpio, pdev->name);
- if (err) {
- dev_err(&pdev->dev, "failed to request gpio-%d\n",
- gpio);
- goto failed_free_rows;
- }
- }
-
- gpio = EP93XX_GPIO_LINE_COL0;
- for (i = 0; i < keypad->pdata->matrix_key_cols; i++, gpio++) {
- err = gpio_request(gpio, pdev->name);
- if (err) {
- dev_err(&pdev->dev, "failed to request gpio-%d\n",
- gpio);
- goto failed_free_cols;
- }
- }
+ err = ep93xx_keypad_acquire_gpio(pdev);
+ if (err)
+ goto failed_free_io;
- keypad->clk = clk_get(&pdev->dev, "key_clk");
+ keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get keypad clock\n");
err = PTR_ERR(keypad->clk);
- goto failed_free_io;
+ goto failed_free_gpio;
}
- /* Create and register the input driver */
input_dev = input_allocate_device();
if (!input_dev) {
- dev_err(&pdev->dev, "failed to allocate input device\n");
err = -ENOMEM;
goto failed_put_clk;
}
@@ -358,44 +330,29 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
ep93xx_keypad_build_keycode(keypad);
platform_set_drvdata(pdev, keypad);
- err = request_irq(irq, ep93xx_keypad_irq_handler, IRQF_DISABLED,
- pdev->name, keypad);
- if (err) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
+ IRQF_DISABLED, pdev->name, keypad);
+ if (err)
goto failed_free_dev;
- }
-
- keypad->irq = irq;
- /* Register the input device */
err = input_register_device(input_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to register input device\n");
+ if (err)
goto failed_free_irq;
- }
device_init_wakeup(&pdev->dev, 1);
return 0;
failed_free_irq:
- free_irq(irq, pdev);
+ free_irq(keypad->irq, pdev);
platform_set_drvdata(pdev, NULL);
failed_free_dev:
input_free_device(input_dev);
failed_put_clk:
clk_put(keypad->clk);
+failed_free_gpio:
+ ep93xx_keypad_release_gpio(pdev);
failed_free_io:
- i = keypad->pdata->matrix_key_cols - 1;
- gpio = EP93XX_GPIO_LINE_COL0 + i;
-failed_free_cols:
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
- i = keypad->pdata->matrix_key_rows - 1;
- gpio = EP93XX_GPIO_LINE_ROW0 + i;
-failed_free_rows:
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
iounmap(keypad->mmio_base);
failed_free_mem:
release_mem_region(res->start, resource_size(res));
@@ -408,7 +365,6 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
- int i, gpio;
free_irq(keypad->irq, pdev);
@@ -420,15 +376,7 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
- i = keypad->pdata->matrix_key_cols - 1;
- gpio = EP93XX_GPIO_LINE_COL0 + i;
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
-
- i = keypad->pdata->matrix_key_rows - 1;
- gpio = EP93XX_GPIO_LINE_ROW0 + i;
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
+ ep93xx_keypad_release_gpio(pdev);
iounmap(keypad->mmio_base);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 34f4a29..d3c8b61 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -29,11 +29,13 @@ struct matrix_keypad {
unsigned short *keycodes;
unsigned int row_shift;
+ DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
+
uint32_t last_key_state[MATRIX_MAX_COLS];
struct delayed_work work;
+ spinlock_t lock;
bool scan_pending;
bool stopped;
- spinlock_t lock;
};
/*
@@ -222,9 +224,16 @@ static int matrix_keypad_suspend(struct device *dev)
matrix_keypad_stop(keypad->input_dev);
- if (device_may_wakeup(&pdev->dev))
- for (i = 0; i < pdata->num_row_gpios; i++)
- enable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+ if (device_may_wakeup(&pdev->dev)) {
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ if (!test_bit(i, keypad->disabled_gpios)) {
+ unsigned int gpio = pdata->row_gpios[i];
+
+ if (enable_irq_wake(gpio_to_irq(gpio)) == 0)
+ __set_bit(i, keypad->disabled_gpios);
+ }
+ }
+ }
return 0;
}
@@ -236,9 +245,15 @@ static int matrix_keypad_resume(struct device *dev)
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
int i;
- if (device_may_wakeup(&pdev->dev))
- for (i = 0; i < pdata->num_row_gpios; i++)
- disable_irq_wake(gpio_to_irq(pdata->row_gpios[i]));
+ if (device_may_wakeup(&pdev->dev)) {
+ for (i = 0; i < pdata->num_row_gpios; i++) {
+ if (test_and_clear_bit(i, keypad->disabled_gpios)) {
+ unsigned int gpio = pdata->row_gpios[i];
+
+ disable_irq_wake(gpio_to_irq(gpio));
+ }
+ }
+ }
matrix_keypad_start(keypad->input_dev);
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index eeaa7ac..21d6184 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -253,14 +253,6 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
u8 reg;
int ret;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
/* Read & Clear TWL4030 pending interrupt */
ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1);
@@ -403,7 +395,8 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
*
* NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
*/
- error = request_irq(kp->irq, do_kp_irq, 0, pdev->name, kp);
+ error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
+ 0, pdev->name, kp);
if (error) {
dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
kp->irq);
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index bdde5c8..e9069b8 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -39,18 +39,8 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
int err;
u8 value;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate since this is a threaded
- * IRQ and can sleep due to the i2c reads it has to issue.
- * Although it might be friendlier not to borrow this thread
- * context...
- */
- local_irq_enable();
-#endif
-
err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
- STS_HW_CONDITIONS);
+ STS_HW_CONDITIONS);
if (!err) {
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
input_sync(pwr);
@@ -80,7 +70,7 @@ static int __devinit twl4030_pwrbutton_probe(struct platform_device *pdev)
pwr->phys = "twl4030_pwrbutton/input0";
pwr->dev.parent = &pdev->dev;
- err = request_irq(irq, powerbutton_irq,
+ err = request_threaded_irq(irq, NULL, powerbutton_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_pwrbutton", pwr);
if (err < 0) {
diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c
index 33309fe..c8f5a9a 100644
--- a/drivers/input/misc/winbond-cir.c
+++ b/drivers/input/misc/winbond-cir.c
@@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data)
return;
}
- dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
+ dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X "
"toggle %u mode %u scan 0x%08X\n",
address,
command,
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 38da6ab..c0afb71 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -1328,7 +1328,7 @@ static struct platform_driver wistron_driver = {
.driver = {
.name = "wistron-bios",
.owner = THIS_MODULE,
-#if CONFIG_PM
+#ifdef CONFIG_PM
.pm = &wistron_pm_ops,
#endif
},
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 3feeb3a..c714ca2 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
default y
- depends on MOUSE_PS2 && X86
+ depends on MOUSE_PS2 && X86 && DMI
help
Say Y here if you have a Fujitsu B-series Lifebook PS/2
TouchScreen connected to your system.
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a3f492a..f93c2c0 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -5,6 +5,7 @@
* Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com>
* Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru>
* Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net>
*
* ALPS detection, tap switching and status querying info is taken from
* tpconfig utility (by C. Scott Ananian and Bruce Kall).
@@ -28,7 +29,6 @@
#define dbg(format, arg...) do {} while (0)
#endif
-
#define ALPS_OLDPROTO 0x01 /* old style input */
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -37,7 +37,8 @@
#define ALPS_FW_BK_1 0x10 /* front & back buttons present */
#define ALPS_FW_BK_2 0x20 /* front & back buttons present */
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
-
+#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
+ 6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
@@ -58,7 +59,9 @@ static const struct alps_model_info alps_model_data[] = {
{ { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
{ { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
{ { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */
+ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
+ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
{ { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
};
@@ -69,20 +72,88 @@ static const struct alps_model_info alps_model_data[] = {
*/
/*
- * ALPS abolute Mode - new format
+ * PS/2 packet format
+ *
+ * byte 0: 0 0 YSGN XSGN 1 M R L
+ * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
+ * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ * Note that the device never signals overflow condition.
+ *
+ * ALPS absolute Mode - new format
*
* byte 0: 1 ? ? ? 1 ? ? ?
* byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
+ * byte 2: 0 x10 x9 x8 x7 ? fin ges
* byte 3: 0 y9 y8 y7 1 M R L
* byte 4: 0 y6 y5 y4 y3 y2 y1 y0
* byte 5: 0 z6 z5 z4 z3 z2 z1 z0
*
+ * Dualpoint device -- interleaved packet format
+ *
+ * byte 0: 1 1 0 0 1 1 1 1
+ * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ * byte 2: 0 x10 x9 x8 x7 0 fin ges
+ * byte 3: 0 0 YSGN XSGN 1 1 1 1
+ * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
+ * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ * byte 6: 0 y9 y8 y7 1 m r l
+ * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
+ * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
+ *
+ * CAPITALS = stick, miniscules = touchpad
+ *
* ?'s can have different meanings on different models,
* such as wheel rotation, extra buttons, stick buttons
* on a dualpoint, etc.
*/
+static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+ unsigned char data)
+{
+ return (data & model->mask0) == model->byte0;
+}
+
+static void alps_report_buttons(struct psmouse *psmouse,
+ struct input_dev *dev1, struct input_dev *dev2,
+ int left, int right, int middle)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ if (model->flags & ALPS_PS2_INTERLEAVED) {
+ struct input_dev *dev;
+
+ /*
+ * If shared button has already been reported on the
+ * other device (dev2) then this event should be also
+ * sent through that device.
+ */
+ dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_LEFT, left);
+
+ dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_RIGHT, right);
+
+ dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_MIDDLE, middle);
+
+ /*
+ * Sync the _other_ device now, we'll do the first
+ * device later once we report the rest of the events.
+ */
+ input_sync(dev2);
+ } else {
+ /*
+ * For devices with non-interleaved packets we know what
+ * device buttons belong to so we can simply report them.
+ */
+ input_report_key(dev1, BTN_LEFT, left);
+ input_report_key(dev1, BTN_RIGHT, right);
+ input_report_key(dev1, BTN_MIDDLE, middle);
+ }
+}
+
static void alps_process_packet(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
@@ -93,18 +164,6 @@ static void alps_process_packet(struct psmouse *psmouse)
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
- input_report_key(dev2, BTN_LEFT, packet[0] & 1);
- input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
- input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
- input_report_rel(dev2, REL_X,
- packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
- input_report_rel(dev2, REL_Y,
- packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
- input_sync(dev2);
- return;
- }
-
if (model->flags & ALPS_OLDPROTO) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
@@ -140,18 +199,13 @@ static void alps_process_packet(struct psmouse *psmouse)
input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
- input_report_key(dev2, BTN_LEFT, left);
- input_report_key(dev2, BTN_RIGHT, right);
- input_report_key(dev2, BTN_MIDDLE, middle);
+ alps_report_buttons(psmouse, dev2, dev, left, right, middle);
- input_sync(dev);
input_sync(dev2);
return;
}
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
- input_report_key(dev, BTN_MIDDLE, middle);
+ alps_report_buttons(psmouse, dev, dev2, left, right, middle);
/* Convert hardware tap to a reasonable Z value */
if (ges && !fin)
@@ -202,25 +256,168 @@ static void alps_process_packet(struct psmouse *psmouse)
input_sync(dev);
}
+static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ unsigned char packet[],
+ bool report_buttons)
+{
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev2 = priv->dev2;
+
+ if (report_buttons)
+ alps_report_buttons(psmouse, dev2, psmouse->dev,
+ packet[0] & 1, packet[0] & 2, packet[0] & 4);
+
+ input_report_rel(dev2, REL_X,
+ packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
+ input_report_rel(dev2, REL_Y,
+ packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
+
+ input_sync(dev2);
+}
+
+static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+
+ if (psmouse->pktcnt < 6)
+ return PSMOUSE_GOOD_DATA;
+
+ if (psmouse->pktcnt == 6) {
+ /*
+ * Start a timer to flush the packet if it ends up last
+ * 6-byte packet in the stream. Timer needs to fire
+ * psmouse core times out itself. 20 ms should be enough
+ * to decide if we are getting more data or not.
+ */
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20));
+ return PSMOUSE_GOOD_DATA;
+ }
+
+ del_timer(&priv->timer);
+
+ if (psmouse->packet[6] & 0x80) {
+
+ /*
+ * Highest bit is set - that means we either had
+ * complete ALPS packet and this is start of the
+ * next packet or we got garbage.
+ */
+
+ if (((psmouse->packet[3] |
+ psmouse->packet[4] |
+ psmouse->packet[5]) & 0x80) ||
+ (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
+ dbg("refusing packet %x %x %x %x "
+ "(suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5], psmouse->packet[6]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ alps_process_packet(psmouse);
+
+ /* Continue with the next packet */
+ psmouse->packet[0] = psmouse->packet[6];
+ psmouse->pktcnt = 1;
+
+ } else {
+
+ /*
+ * High bit is 0 - that means that we indeed got a PS/2
+ * packet in the middle of ALPS packet.
+ *
+ * There is also possibility that we got 6-byte ALPS
+ * packet followed by 3-byte packet from trackpoint. We
+ * can not distinguish between these 2 scenarios but
+ * becase the latter is unlikely to happen in course of
+ * normal operation (user would need to press all
+ * buttons on the pad and start moving trackpoint
+ * without touching the pad surface) we assume former.
+ * Even if we are wrong the wost thing that would happen
+ * the cursor would jump but we should not get protocol
+ * desynchronization.
+ */
+
+ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
+ false);
+
+ /*
+ * Continue with the standard ALPS protocol handling,
+ * but make sure we won't process it as an interleaved
+ * packet again, which may happen if all buttons are
+ * pressed. To avoid this let's reset the 4th bit which
+ * is normally 1.
+ */
+ psmouse->packet[3] = psmouse->packet[6] & 0xf7;
+ psmouse->pktcnt = 4;
+ }
+
+ return PSMOUSE_GOOD_DATA;
+}
+
+static void alps_flush_packet(unsigned long data)
+{
+ struct psmouse *psmouse = (struct psmouse *)data;
+
+ serio_pause_rx(psmouse->ps2dev.serio);
+
+ if (psmouse->pktcnt == 6) {
+
+ /*
+ * We did not any more data in reasonable amount of time.
+ * Validate the last 3 bytes and process as a standard
+ * ALPS packet.
+ */
+ if ((psmouse->packet[3] |
+ psmouse->packet[4] |
+ psmouse->packet[5]) & 0x80) {
+ dbg("refusing packet %x %x %x "
+ "(suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5]);
+ } else {
+ alps_process_packet(psmouse);
+ }
+ psmouse->pktcnt = 0;
+ }
+
+ serio_continue_rx(psmouse->ps2dev.serio);
+}
+
static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
if (psmouse->pktcnt == 3) {
- alps_process_packet(psmouse);
+ alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+ true);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
}
- if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0)
+ /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
+
+ if ((model->flags & ALPS_PS2_INTERLEAVED) &&
+ psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
+ return alps_handle_interleaved_ps2(psmouse);
+ }
+
+ if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
+ dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
+ psmouse->packet[0], model->mask0, model->byte0);
return PSMOUSE_BAD_DATA;
+ }
/* Bytes 2 - 6 should have 0 in the highest bit */
if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
- (psmouse->packet[psmouse->pktcnt - 1] & 0x80))
+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
+ dbg("refusing packet[%i] = %x\n",
+ psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
return PSMOUSE_BAD_DATA;
+ }
if (psmouse->pktcnt == 6) {
alps_process_packet(psmouse);
@@ -459,6 +656,7 @@ static void alps_disconnect(struct psmouse *psmouse)
struct alps_data *priv = psmouse->private;
psmouse_reset(psmouse);
+ del_timer_sync(&priv->timer);
input_unregister_device(priv->dev2);
kfree(priv);
}
@@ -476,6 +674,8 @@ int alps_init(struct psmouse *psmouse)
goto init_fail;
priv->dev2 = dev2;
+ setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
psmouse->private = priv;
model = alps_get_model(psmouse, &version);
@@ -487,6 +687,17 @@ int alps_init(struct psmouse *psmouse)
if (alps_hw_init(psmouse))
goto init_fail;
+ /*
+ * Undo part of setup done for us by psmouse core since touchpad
+ * is not a relative device.
+ */
+ __clear_bit(EV_REL, dev1->evbit);
+ __clear_bit(REL_X, dev1->relbit);
+ __clear_bit(REL_Y, dev1->relbit);
+
+ /*
+ * Now set up our capabilities.
+ */
dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY);
dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH);
dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index bc87936..904ed8b 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -23,6 +23,7 @@ struct alps_data {
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
int prev_fin; /* Finger bit from previous packet */
+ struct timer_list timer;
};
#ifdef CONFIG_MOUSE_PS2_ALPS
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 0d1d334..4f8fe08 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -139,6 +139,7 @@ struct tp_finger {
/* trackpad finger data size, empirically at least ten fingers */
#define SIZEOF_FINGER sizeof(struct tp_finger)
#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER)
+#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
struct bcm5974_param {
@@ -284,6 +285,26 @@ static void setup_events_to_report(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_Y,
0, cfg->y.dim, cfg->y.fuzz, 0);
+ /* finger touch area */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger approach area */
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger orientation */
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ -MAX_FINGER_ORIENTATION,
+ MAX_FINGER_ORIENTATION, 0, 0);
+ /* finger position */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ cfg->x.devmin, cfg->x.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ cfg->y.devmin, cfg->y.devmax, 0, 0);
+
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
@@ -310,13 +331,29 @@ static int report_bt_state(struct bcm5974 *dev, int size)
return 0;
}
+static void report_finger_data(struct input_dev *input,
+ const struct bcm5974_config *cfg,
+ const struct tp_finger *f)
+{
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ MAX_FINGER_ORIENTATION - raw2int(f->orientation));
+ input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y));
+ input_mt_sync(input);
+}
+
/* report trackpad data as logical trackpad state */
static int report_tp_state(struct bcm5974 *dev, int size)
{
const struct bcm5974_config *c = &dev->cfg;
const struct tp_finger *f;
struct input_dev *input = dev->input;
- int raw_p, raw_w, raw_x, raw_y, raw_n;
+ int raw_p, raw_w, raw_x, raw_y, raw_n, i;
int ptest, origin, ibt = 0, nmin = 0, nmax = 0;
int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
@@ -329,6 +366,11 @@ static int report_tp_state(struct bcm5974 *dev, int size)
/* always track the first finger; when detached, start over */
if (raw_n) {
+
+ /* report raw trackpad data */
+ for (i = 0; i < raw_n; i++)
+ report_finger_data(input, c, &f[i]);
+
raw_p = raw2int(f->force_major);
raw_w = raw2int(f->size_major);
raw_x = raw2int(f->abs_x);
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index b146237..90be30e 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -427,7 +427,6 @@ static void hgpk_recalib_work(struct work_struct *work)
static int hgpk_register(struct psmouse *psmouse)
{
- struct input_dev *dev = psmouse->dev;
int err;
/* register handlers */
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 2e6bdfe..7c1d7d4 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -44,7 +44,6 @@ static int lifebook_set_6byte_proto(const struct dmi_system_id *d)
}
static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
-#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
/* FLORA-ie 55mi */
.matches = {
@@ -54,6 +53,12 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
{
/* LifeBook B */
.matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lifebook B Series"),
+ },
+ },
+ {
+ /* LifeBook B */
+ .matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B Series"),
},
},
@@ -118,7 +123,6 @@ static const struct dmi_system_id __initconst lifebook_dmi_table[] = {
},
},
{ }
-#endif
};
void __init lifebook_module_init(void)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fd0bc09..d8c0c8d 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -627,8 +627,15 @@ static int psmouse_extensions(struct psmouse *psmouse,
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
- if (!set_properties || synaptics_init(psmouse) == 0)
+/*
+ * Try activating protocol, but check if support is enabled first, since
+ * we try detecting Synaptics even when protocol is disabled.
+ */
+ if (synaptics_supported() &&
+ (!set_properties || synaptics_init(psmouse) == 0)) {
return PSMOUSE_SYNAPTICS;
+ }
+
/*
* Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
* Unfortunately Logitech/Genius probes confuse some firmware versions so
@@ -683,19 +690,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
max_proto = PSMOUSE_IMEX;
}
-/*
- * Try Finger Sensing Pad
- */
- if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
- if (!set_properties || fsp_init(psmouse) == 0)
- return PSMOUSE_FSP;
-/*
- * Init failed, try basic relative protocols
- */
- max_proto = PSMOUSE_IMEX;
- }
- }
if (max_proto > PSMOUSE_IMEX) {
if (genius_detect(psmouse, set_properties) == 0)
@@ -712,6 +706,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
}
/*
+ * Try Finger Sensing Pad. We do it here because its probe upsets
+ * Trackpoint devices (causing TP_READ_ID command to time out).
+ */
+ if (max_proto > PSMOUSE_IMEX) {
+ if (fsp_detect(psmouse, set_properties) == 0) {
+ if (!set_properties || fsp_init(psmouse) == 0)
+ return PSMOUSE_FSP;
+/*
+ * Init failed, try basic relative protocols
+ */
+ max_proto = PSMOUSE_IMEX;
+ }
+ }
+
+/*
* Reset to defaults in case the device got confused by extended
* protocol probes. Note that we follow up with full reset because
* some mice put themselves to sleep when they see PSMOUSE_RESET_DIS.
@@ -1132,12 +1141,22 @@ static void psmouse_cleanup(struct serio *serio)
psmouse_deactivate(parent);
}
- psmouse_deactivate(psmouse);
+ psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
+
+ /*
+ * Disable stream mode so cleanup routine can proceed undisturbed.
+ */
+ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
+ printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
- psmouse_reset(psmouse);
+/*
+ * Reset the mouse to defaults (bare PS/2 protocol).
+ */
+ ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
/*
* Some boxes, such as HP nx7400, get terribly confused if mouse
@@ -1447,24 +1466,10 @@ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *de
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse;
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out;
- }
psmouse = serio_get_drvdata(serio);
- retval = attr->show(psmouse, attr->data, buf);
-
-out:
- serio_unpin_driver(serio);
- return retval;
+ return attr->show(psmouse, attr->data, buf);
}
ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
@@ -1475,18 +1480,9 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
struct psmouse *psmouse, *parent = NULL;
int retval;
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out_unpin;
- }
-
retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
- goto out_unpin;
+ goto out;
psmouse = serio_get_drvdata(serio);
@@ -1516,8 +1512,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
out_unlock:
mutex_unlock(&psmouse_mutex);
- out_unpin:
- serio_unpin_driver(serio);
+ out:
return retval;
}
@@ -1579,9 +1574,7 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
}
mutex_unlock(&psmouse_mutex);
- serio_unpin_driver(serio);
serio_unregister_child_port(serio);
- serio_pin_driver_uninterruptible(serio);
mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 77b9fd0..81a6b81 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -658,9 +658,9 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
if (packet[3] & BIT(1))
button_status |= 0x0f; /* wheel up */
if (packet[3] & BIT(2))
- button_status |= BIT(5);/* horizontal left */
+ button_status |= BIT(4);/* horizontal left */
if (packet[3] & BIT(3))
- button_status |= BIT(4);/* horizontal right */
+ button_status |= BIT(5);/* horizontal right */
/* push back to packet queue */
if (button_status != 0)
packet[3] = button_status;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 05689e7..d3f5243 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -743,6 +743,11 @@ int synaptics_init(struct psmouse *psmouse)
return -1;
}
+bool synaptics_supported(void)
+{
+ return true;
+}
+
#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
void __init synaptics_module_init(void)
@@ -754,5 +759,10 @@ int synaptics_init(struct psmouse *psmouse)
return -ENOSYS;
}
+bool synaptics_supported(void)
+{
+ return false;
+}
+
#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 838e7f2..f0f40a3 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -109,5 +109,6 @@ void synaptics_module_init(void);
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
+bool synaptics_supported(void);
#endif /* _SYNAPTICS_H */
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index f479ea5..320b7ca 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -79,11 +79,11 @@ static void altera_ps2_close(struct serio *io)
/*
* Add one device to this driver.
*/
-static int altera_ps2_probe(struct platform_device *pdev)
+static int __devinit altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
- int error;
+ int error, irq;
ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
@@ -108,11 +108,13 @@ static int altera_ps2_probe(struct platform_device *pdev)
goto err_free_mem;
}
- ps2if->irq = platform_get_irq(pdev, 0);
- if (ps2if->irq < 0) {
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
error = -ENXIO;
goto err_free_mem;
}
+ ps2if->irq = irq;
if (!request_mem_region(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res), pdev->name)) {
@@ -155,7 +157,7 @@ static int altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int altera_ps2_remove(struct platform_device *pdev)
+static int __devexit altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
@@ -175,9 +177,10 @@ static int altera_ps2_remove(struct platform_device *pdev)
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = altera_ps2_remove,
+ .remove = __devexit_p(altera_ps2_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 89b3941..92563a6 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
@@ -134,7 +134,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
io->port_data = kmi;
io->dev.parent = &dev->dev;
- kmi->io = io;
+ kmi->io = io;
kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!kmi->base) {
ret = -ENOMEM;
@@ -162,7 +162,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
return ret;
}
-static int amba_kmi_remove(struct amba_device *dev)
+static int __devexit amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -197,10 +197,11 @@ static struct amba_id amba_kmi_idtable[] = {
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
+ .owner = THIS_MODULE,
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
- .remove = amba_kmi_remove,
+ .remove = __devexit_p(amba_kmi_remove),
.resume = amba_kmi_resume,
};
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index a6fb7a3..b54452a 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -137,7 +137,7 @@ static int psif_write(struct serio *io, unsigned char val)
spin_lock_irqsave(&psif->lock, flags);
while (!(psif_readl(psif, SR) & PSIF_BIT(TXEMPTY)) && timeout--)
- msleep(10);
+ udelay(50);
if (timeout >= 0) {
psif_writel(psif, THR, val);
@@ -352,6 +352,7 @@ static struct platform_driver psif_driver = {
.remove = __exit_p(psif_remove),
.driver = {
.name = "atmel_psif",
+ .owner = THIS_MODULE,
},
.suspend = psif_suspend,
.resume = psif_resume,
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index bd0f92d..06addfa 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org>
*
* Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c
- * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
+ * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
* Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr>
* Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr>
@@ -326,7 +326,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int __init gscps2_probe(struct parisc_device *dev)
+static int __devinit gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -443,7 +443,7 @@ static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = gscps2_remove,
+ .remove = __devexit_p(gscps2_remove),
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 7ba9f2b..6cd03eb 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -993,10 +993,8 @@ int hil_mlc_unregister(hil_mlc *mlc)
static int __init hil_mlc_init(void)
{
- init_timer(&hil_mlcs_kicker);
- hil_mlcs_kicker.expires = jiffies + HZ;
- hil_mlcs_kicker.function = &hil_mlcs_timer;
- add_timer(&hil_mlcs_kicker);
+ setup_timer(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
+ mod_timer(&hil_mlcs_kicker, jiffies + HZ);
tasklet_enable(&hil_mlcs_tasklet);
@@ -1005,7 +1003,7 @@ static int __init hil_mlc_init(void)
static void __exit hil_mlc_exit(void)
{
- del_timer(&hil_mlcs_kicker);
+ del_timer_sync(&hil_mlcs_kicker);
tasklet_disable(&hil_mlcs_tasklet);
tasklet_kill(&hil_mlcs_tasklet);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7fbffe4..2a5982e 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -158,6 +158,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
@@ -516,6 +524,13 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
*/
static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
+ /* Acer Aspire 5610 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ },
+ },
+ {
/* Acer Aspire 5630 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 1df02d25..b54aee7 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -126,6 +126,8 @@ static unsigned char i8042_suppress_kbd_ack;
static struct platform_device *i8042_platform_device;
static irqreturn_t i8042_interrupt(int irq, void *dev_id);
+static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
+ struct serio *serio);
void i8042_lock_chip(void)
{
@@ -139,6 +141,48 @@ void i8042_unlock_chip(void)
}
EXPORT_SYMBOL(i8042_unlock_chip);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&i8042_lock, flags);
+
+ if (i8042_platform_filter) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ i8042_platform_filter = filter;
+
+out:
+ spin_unlock_irqrestore(&i8042_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(i8042_install_filter);
+
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *port))
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&i8042_lock, flags);
+
+ if (i8042_platform_filter != filter) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ i8042_platform_filter = NULL;
+
+out:
+ spin_unlock_irqrestore(&i8042_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(i8042_remove_filter);
+
/*
* The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to
* be ready for reading values from it / writing values to it.
@@ -369,6 +413,31 @@ static void i8042_stop(struct serio *serio)
}
/*
+ * i8042_filter() filters out unwanted bytes from the input data stream.
+ * It is called from i8042_interrupt and thus is running with interrupts
+ * off and i8042_lock held.
+ */
+static bool i8042_filter(unsigned char data, unsigned char str,
+ struct serio *serio)
+{
+ if (unlikely(i8042_suppress_kbd_ack)) {
+ if ((~str & I8042_STR_AUXDATA) &&
+ (data == 0xfa || data == 0xfe)) {
+ i8042_suppress_kbd_ack--;
+ dbg("Extra keyboard ACK - filtered out\n");
+ return true;
+ }
+ }
+
+ if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
+ dbg("Filtered out by platfrom filter\n");
+ return true;
+ }
+
+ return false;
+}
+
+/*
* i8042_interrupt() is the most important function in this driver -
* it handles the interrupts from the i8042, and sends incoming bytes
* to the upper layers.
@@ -377,13 +446,16 @@ static void i8042_stop(struct serio *serio)
static irqreturn_t i8042_interrupt(int irq, void *dev_id)
{
struct i8042_port *port;
+ struct serio *serio;
unsigned long flags;
unsigned char str, data;
unsigned int dfl;
unsigned int port_no;
+ bool filtered;
int ret = 1;
spin_lock_irqsave(&i8042_lock, flags);
+
str = i8042_read_status();
if (unlikely(~str & I8042_STR_OBF)) {
spin_unlock_irqrestore(&i8042_lock, flags);
@@ -391,8 +463,8 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
ret = 0;
goto out;
}
+
data = i8042_read_data();
- spin_unlock_irqrestore(&i8042_lock, flags);
if (i8042_mux_present && (str & I8042_STR_AUXDATA)) {
static unsigned long last_transmit;
@@ -441,21 +513,19 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
}
port = &i8042_ports[port_no];
+ serio = port->exists ? port->serio : NULL;
dbg("%02x <- i8042 (interrupt, %d, %d%s%s)",
data, port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
dfl & SERIO_TIMEOUT ? ", timeout" : "");
- if (unlikely(i8042_suppress_kbd_ack))
- if (port_no == I8042_KBD_PORT_NO &&
- (data == 0xfa || data == 0xfe)) {
- i8042_suppress_kbd_ack--;
- goto out;
- }
+ filtered = i8042_filter(data, str, serio);
+
+ spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists))
- serio_interrupt(port->serio, data, dfl);
+ if (likely(port->exists && !filtered))
+ serio_interrupt(serio, data, dfl);
out:
return IRQ_RETVAL(ret);
@@ -1091,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
return 0;
}
+static int i8042_pm_thaw(struct device *dev)
+{
+ i8042_interrupt(0, NULL);
+
+ return 0;
+}
+
static const struct dev_pm_ops i8042_pm_ops = {
.suspend = i8042_pm_reset,
.resume = i8042_pm_restore,
+ .thaw = i8042_pm_thaw,
.poweroff = i8042_pm_reset,
.restore = i8042_pm_restore,
};
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index f412c69..d55874e 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -180,8 +180,8 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
}
}
-static inline unsigned int
-ps2_test_one(struct ps2if *ps2if, unsigned int mask)
+static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
+ unsigned int mask)
{
unsigned int val;
@@ -197,7 +197,7 @@ ps2_test_one(struct ps2if *ps2if, unsigned int mask)
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
-static int __init ps2_test(struct ps2if *ps2if)
+static int __devinit ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
@@ -312,7 +312,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int ps2_remove(struct sa1111_dev *dev)
+static int __devexit ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -335,7 +335,7 @@ static struct sa1111_driver ps2_driver = {
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
- .remove = ps2_remove,
+ .remove = __devexit_p(ps2_remove),
};
static int __init ps2_init(void)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 0236f0d..e0f3018 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -284,13 +284,7 @@ static void serio_handle_event(void)
mutex_lock(&serio_mutex);
- /*
- * Note that we handle only one event here to give swsusp
- * a chance to freeze kseriod thread. Serio events should
- * be pretty rare so we are not concerned about taking
- * performance hit.
- */
- if ((event = serio_get_event())) {
+ while ((event = serio_get_event())) {
switch (event->type) {
case SERIO_REGISTER_PORT:
@@ -380,10 +374,9 @@ static struct serio *serio_get_pending_child(struct serio *parent)
static int serio_thread(void *nothing)
{
- set_freezable();
do {
serio_handle_event();
- wait_event_freezable(serio_wait,
+ wait_event_interruptible(serio_wait,
kthread_should_stop() || !list_empty(&serio_event_list));
} while (!kthread_should_stop());
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 9114ae1..16310f3 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom.h
*
- * USB Wacom Graphire and Wacom Intuos tablet support
+ * USB Wacom tablet support
*
* Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
* Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
@@ -69,6 +69,9 @@
* v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
* v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
* v1.51 (pc) - Added support for Intuos4
+ * v1.52 (pc) - Query Wacom data upon system resume
+ * - add defines for features->type
+ * - add new devices (0x9F, 0xE2, and 0XE3)
*/
/*
@@ -89,9 +92,9 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.51"
+#define DRIVER_VERSION "v1.52"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
-#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
+#define DRIVER_DESC "USB Wacom tablet driver"
#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -133,6 +136,8 @@ extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_w
extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
+extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
+extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern __u16 wacom_le16_to_cpu(unsigned char *data);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index ea30c98..072f33b 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom_sys.c
*
- * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
+ * USB Wacom tablet support - system specific code
*/
/*
@@ -209,6 +209,7 @@ void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features->distance_max, 0, 0);
}
@@ -256,6 +257,7 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
@@ -269,7 +271,8 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_STYLUS2);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
+ BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
}
void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -277,12 +280,32 @@ void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
}
+void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features->device_type == BTN_TOOL_DOUBLETAP ||
+ wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_set_abs_params(input_dev, ABS_RX, 0, wacom_wac->features->x_phy, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, wacom_wac->features->y_phy, 0, 0);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
+ }
+}
+
+void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
+ input_dev->evbit[0] |= BIT_MASK(EV_MSC);
+ input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
+ }
+}
+
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
- struct wacom_wac *wacom_wac)
+ struct wacom_features *features)
{
struct usb_device *dev = interface_to_usbdev(intf);
- struct wacom_features *features = wacom_wac->features;
- char limit = 0, result = 0;
+ char limit = 0;
+ /* result has to be defined as int for some devices */
+ int result = 0;
int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
unsigned char *report;
@@ -328,13 +351,24 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_X:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->touch_x_max =
- features->touch_y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ features->device_type = BTN_TOOL_DOUBLETAP;
+ if (features->type == TABLETPC2FG) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ }
features->x_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ features->x_phy =
wacom_le16_to_cpu(&report[i + 6]);
- i += 7;
+ features->unit = report[i + 9];
+ features->unitExpo = report[i + 11];
+ i += 12;
} else if (pen) {
+ /* penabled only accepts exact bytes of data */
+ if (features->type == TABLETPC2FG)
+ features->pktlen = WACOM_PKGLEN_PENABLED;
+ features->device_type = BTN_TOOL_PEN;
features->x_max =
wacom_le16_to_cpu(&report[i + 3]);
i += 4;
@@ -350,10 +384,35 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
break;
case HID_USAGE_Y:
- if (usage == WCM_DESKTOP)
- features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
- i += 4;
+ if (usage == WCM_DESKTOP) {
+ if (finger) {
+ features->device_type = BTN_TOOL_DOUBLETAP;
+ if (features->type == TABLETPC2FG) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ features->y_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ features->y_phy =
+ wacom_le16_to_cpu(&report[i + 6]);
+ i += 7;
+ } else {
+ features->y_max =
+ features->x_max;
+ features->y_phy =
+ wacom_le16_to_cpu(&report[i + 3]);
+ i += 4;
+ }
+ } else if (pen) {
+ /* penabled only accepts exact bytes of data */
+ if (features->type == TABLETPC2FG)
+ features->pktlen = WACOM_PKGLEN_PENABLED;
+ features->device_type = BTN_TOOL_PEN;
+ features->y_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ i += 4;
+ }
+ }
break;
case HID_USAGE_FINGER:
@@ -376,7 +435,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
break;
case HID_COLLECTION:
- /* reset UsagePage ans Finger */
+ /* reset UsagePage and Finger */
finger = usage = 0;
break;
}
@@ -388,43 +447,92 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
return result;
}
-static int wacom_query_tablet_data(struct usb_interface *intf)
+static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features)
{
unsigned char *rep_data;
- int limit = 0;
- int error;
+ int limit = 0, report_id = 2;
+ int error = -ENOMEM;
rep_data = kmalloc(2, GFP_KERNEL);
if (!rep_data)
- return -ENOMEM;
-
- do {
- rep_data[0] = 2;
- rep_data[1] = 2;
- error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- 2, rep_data, 2);
- if (error >= 0)
- error = usb_get_report(intf,
- WAC_HID_FEATURE_REPORT, 2,
- rep_data, 2);
- } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+ return error;
+
+ /* ask to report tablet data if it is 2FGT or not a Tablet PC */
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ do {
+ rep_data[0] = 3;
+ rep_data[1] = 4;
+ report_id = 3;
+ error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2);
+ if (error >= 0)
+ error = usb_get_report(intf,
+ WAC_HID_FEATURE_REPORT, report_id,
+ rep_data, 3);
+ } while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
+ } else if (features->type != TABLETPC && features->type != TABLETPC2FG) {
+ do {
+ rep_data[0] = 2;
+ rep_data[1] = 2;
+ error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2);
+ if (error >= 0)
+ error = usb_get_report(intf,
+ WAC_HID_FEATURE_REPORT, report_id,
+ rep_data, 2);
+ } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+ }
kfree(rep_data);
return error < 0 ? error : 0;
}
+static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ struct wacom_features *features)
+{
+ int error = 0;
+ struct usb_host_interface *interface = intf->cur_altsetting;
+ struct hid_descriptor *hid_desc;
+
+ /* default device to penabled */
+ features->device_type = BTN_TOOL_PEN;
+
+ /* only Tablet PCs need to retrieve the info */
+ if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
+ goto out;
+
+ if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
+ if (usb_get_extra_descriptor(&interface->endpoint[0],
+ HID_DEVICET_REPORT, &hid_desc)) {
+ printk("wacom: can not retrieve extra class descriptor\n");
+ error = 1;
+ goto out;
+ }
+ }
+ error = wacom_parse_hid(intf, hid_desc, features);
+ if (error)
+ goto out;
+
+ /* touch device found but size is not defined. use default */
+ if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
+ features->x_max = 1023;
+ features->y_max = 1023;
+ }
+
+ out:
+ return error;
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
struct input_dev *input_dev;
int error = -ENOMEM;
- struct hid_descriptor *hid_desc;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
@@ -432,7 +540,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!wacom || !input_dev || !wacom_wac)
goto fail1;
- wacom_wac->data = usb_buffer_alloc(dev, 10, GFP_KERNEL, &wacom->data_dma);
+ wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX, GFP_KERNEL, &wacom->data_dma);
if (!wacom_wac->data)
goto fail1;
@@ -448,7 +556,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
wacom_wac->features = features = get_wacom_feature(id);
- BUG_ON(features->pktlen > 10);
+ BUG_ON(features->pktlen > WACOM_PKGLEN_MAX);
input_dev->name = wacom_wac->features->name;
wacom->wacom_wac = wacom_wac;
@@ -463,47 +571,24 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
endpoint = &intf->cur_altsetting->endpoint[0].desc;
- /* Initialize touch_x_max and touch_y_max in case it is not defined */
- if (wacom_wac->features->type == TABLETPC) {
- features->touch_x_max = 1023;
- features->touch_y_max = 1023;
- } else {
- features->touch_x_max = 0;
- features->touch_y_max = 0;
- }
-
- /* TabletPC need to retrieve the physical and logical maximum from report descriptor */
- if (wacom_wac->features->type == TABLETPC) {
- if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
- if (usb_get_extra_descriptor(&interface->endpoint[0],
- HID_DEVICET_REPORT, &hid_desc)) {
- printk("wacom: can not retrive extra class descriptor\n");
- goto fail2;
- }
- }
- error = wacom_parse_hid(intf, hid_desc, wacom_wac);
- if (error)
- goto fail2;
- }
+ /* Retrieve the physical and logical size for OEM devices */
+ error = wacom_retrieve_hid_descriptor(intf, features);
+ if (error)
+ goto fail2;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
+
input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
- if (features->type == TABLETPC) {
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
- input_set_abs_params(input_dev, ABS_RX, 0, features->touch_x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->touch_y_max, 4, 0);
- }
input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
wacom_init_input_dev(input_dev, wacom_wac);
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
- wacom_wac->data, wacom_wac->features->pktlen,
+ wacom_wac->data, features->pktlen,
wacom_sys_irq, wacom, endpoint->bInterval);
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -512,18 +597,14 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (error)
goto fail3;
- /*
- * Ask the tablet to report tablet data if it is not a Tablet PC.
- * Note that if query fails it is not a hard failure.
- */
- if (wacom_wac->features->type != TABLETPC)
- wacom_query_tablet_data(intf);
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(intf, features);
usb_set_intfdata(intf, wacom);
return 0;
fail3: usb_free_urb(wacom->irq);
- fail2: usb_buffer_free(dev, 10, wacom_wac->data, wacom->data_dma);
+ fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
kfree(wacom_wac);
@@ -539,7 +620,7 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_kill_urb(wacom->irq);
input_unregister_device(wacom->dev);
usb_free_urb(wacom->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10,
+ usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
wacom->wacom_wac->data, wacom->data_dma);
kfree(wacom->wacom_wac);
kfree(wacom);
@@ -559,12 +640,16 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
+ struct wacom_features *features = wacom->wacom_wac->features;
int rv;
mutex_lock(&wacom->lock);
- if (wacom->open)
+ if (wacom->open) {
rv = usb_submit_urb(wacom->irq, GFP_NOIO);
- else
+ /* switch to wacom mode if needed */
+ if (!wacom_retrieve_hid_descriptor(intf, features))
+ wacom_query_tablet_data(intf, features);
+ } else
rv = 0;
mutex_unlock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c896d6a..1056f14 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom_wac.c
*
- * USB Wacom Graphire and Wacom Intuos tablet support - Wacom specific code
+ * USB Wacom tablet support - Wacom specific code
*
*/
@@ -58,16 +58,15 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
unsigned char *data = wacom->data;
int prox, pressure;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_pl_irq: received unknown report #%d", data[0]);
return 0;
}
prox = data[1] & 0x40;
- wacom->id[0] = ERASER_DEVICE_ID;
if (prox) {
-
+ wacom->id[0] = ERASER_DEVICE_ID;
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
if (wacom->features->pressure_max > 255)
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
@@ -128,7 +127,7 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
{
unsigned char *data = wacom->data;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
return 0;
}
@@ -155,14 +154,16 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
{
unsigned char *data = wacom->data;
int x, y, rw;
+ static int penData = 0;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
return 0;
}
if (data[1] & 0x80) {
/* in prox and not a pad data */
+ penData = 1;
switch ((data[1] >> 5) & 3) {
@@ -232,7 +233,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
switch (wacom->features->type) {
case WACOM_G4:
if (data[7] & 0xf8) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -242,10 +247,15 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
} else if (wacom->id[1]) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = 0;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ wacom_report_rel(wcombo, REL_WHEEL, 0);
wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
wacom_report_abs(wcombo, ABS_MISC, 0);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
@@ -253,7 +263,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
break;
case WACOM_MO:
if ((data[7] & 0xf8) || (data[8] & 0xff)) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -264,7 +278,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
} else if (wacom->id[1]) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = 0;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -432,7 +450,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
unsigned int t;
int idx = 0, result;
- if (data[0] != 2 && data[0] != 5 && data[0] != 6 && data[0] != 12) {
+ if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD
+ && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) {
dbg("wacom_intuos_irq: received unknown report #%d", data[0]);
return 0;
}
@@ -442,7 +461,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
idx = data[1] & 0x01;
/* pad packets. Works as a second tool and is always in prox */
- if (data[0] == 12) {
+ if (data[0] == WACOM_REPORT_INTUOSPAD) {
/* initiate the pad as a device */
if (wacom->tool[1] != BTN_TOOL_FINGER)
wacom->tool[1] = BTN_TOOL_FINGER;
@@ -608,95 +627,163 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
return 1;
}
+
+static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
+{
+ wacom_report_abs(wcombo, ABS_X,
+ (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
+ wacom_report_abs(wcombo, ABS_Y,
+ (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[idx], 1);
+ if (idx)
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ else
+ wacom_report_key(wcombo, BTN_TOUCH, 1);
+}
+
+static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
+{
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
+ wacom_report_abs(wcombo, ABS_MISC, 0);
+ wacom_report_key(wcombo, wacom->tool[idx], 0);
+ if (idx)
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ else
+ wacom_report_key(wcombo, BTN_TOUCH, 0);
+ return;
+}
+
+static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
+{
+ char *data = wacom->data;
+ struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
+ static int firstFinger = 0;
+ static int secondFinger = 0;
+
+ wacom->tool[0] = BTN_TOOL_DOUBLETAP;
+ wacom->id[0] = TOUCH_DEVICE_ID;
+ wacom->tool[1] = BTN_TOOL_TRIPLETAP;
+
+ if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
+ switch (data[0]) {
+ case WACOM_REPORT_TPC1FG:
+ wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
+ wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
+ wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
+ wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[0], 1);
+ break;
+ case WACOM_REPORT_TPC2FG:
+ /* keep this byte to send proper out-prox event */
+ wacom->id[1] = data[1] & 0x03;
+
+ if (data[1] & 0x01) {
+ wacom_tpc_finger_in(wacom, wcombo, data, 0);
+ firstFinger = 1;
+ } else if (firstFinger) {
+ wacom_tpc_touch_out(wacom, wcombo, 0);
+ }
+
+ if (data[1] & 0x02) {
+ /* sync first finger data */
+ if (firstFinger)
+ wacom_input_sync(wcombo);
+
+ wacom_tpc_finger_in(wacom, wcombo, data, 1);
+ secondFinger = 1;
+ } else if (secondFinger) {
+ /* sync first finger data */
+ if (firstFinger)
+ wacom_input_sync(wcombo);
+
+ wacom_tpc_touch_out(wacom, wcombo, 1);
+ secondFinger = 0;
+ }
+ if (!(data[1] & 0x01))
+ firstFinger = 0;
+ break;
+ }
+ } else {
+ wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
+ wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
+ wacom_report_key(wcombo, BTN_TOUCH, 1);
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[0], 1);
+ }
+ return;
+}
+
static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
{
char *data = wacom->data;
- int prox = 0, pressure;
+ int prox = 0, pressure, idx = -1;
static int stylusInProx, touchInProx = 1, touchOut;
struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
dbg("wacom_tpc_irq: received report #%d", data[0]);
- if (urb->actual_length == 5 || data[0] == 6) { /* Touch data */
- if (urb->actual_length == 5) { /* with touch */
- prox = data[0] & 0x03;
+ if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
+ data[0] == WACOM_REPORT_TPC1FG || /* single touch */
+ data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
+ if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
+ prox = data[0] & 0x01;
} else { /* with capacity */
- prox = data[1] & 0x03;
+ if (data[0] == WACOM_REPORT_TPC1FG)
+ /* single touch */
+ prox = data[1] & 0x01;
+ else
+ /* 2FG touch data */
+ prox = data[1] & 0x03;
}
if (!stylusInProx) { /* stylus not in prox */
if (prox) {
if (touchInProx) {
- wacom->tool[1] = BTN_TOOL_DOUBLETAP;
- wacom->id[0] = TOUCH_DEVICE_ID;
- if (urb->actual_length != 5) {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
- } else {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_key(wcombo, BTN_TOUCH, 1);
- }
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
+ wacom_tpc_touch_in(wacom, wcombo);
touchOut = 1;
return 1;
}
} else {
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ /* 2FGT out-prox */
+ if (data[0] == WACOM_REPORT_TPC2FG) {
+ idx = (wacom->id[1] & 0x01) - 1;
+ if (idx == 0) {
+ wacom_tpc_touch_out(wacom, wcombo, idx);
+ /* sync first finger event */
+ if (wacom->id[1] & 0x02)
+ wacom_input_sync(wcombo);
+ }
+ idx = (wacom->id[1] & 0x02) - 1;
+ if (idx == 1)
+ wacom_tpc_touch_out(wacom, wcombo, idx);
+ } else /* one finger touch */
+ wacom_tpc_touch_out(wacom, wcombo, 0);
touchOut = 0;
touchInProx = 1;
return 1;
}
} else if (touchOut || !prox) { /* force touch out-prox */
- wacom_report_abs(wcombo, ABS_MISC, TOUCH_DEVICE_ID);
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom_tpc_touch_out(wacom, wcombo, 0);
touchOut = 0;
touchInProx = 1;
return 1;
}
- } else if (data[0] == 2) { /* Penabled */
+ } else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
prox = data[1] & 0x20;
touchInProx = 0;
- wacom->id[0] = ERASER_DEVICE_ID;
-
- /*
- * if going from out of proximity into proximity select between the eraser
- * and the pen based on the state of the stylus2 button, choose eraser if
- * pressed else choose pen. if not a proximity change from out to in, send
- * an out of proximity for previous tool then a in for new tool.
- */
if (prox) { /* in prox */
- if (!wacom->tool[0]) {
+ if (!wacom->id[0]) {
/* Going into proximity select tool */
- wacom->tool[1] = (data[1] & 0x08) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[1] == BTN_TOOL_PEN)
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
wacom->id[0] = STYLUS_DEVICE_ID;
- } else if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[1] & 0x08)) {
- /*
- * was entered with stylus2 pressed
- * report out proximity for previous tool
- */
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_input_sync(wcombo);
-
- /* set new tool */
- wacom->tool[1] = BTN_TOOL_PEN;
- wacom->id[0] = STYLUS_DEVICE_ID;
- return 0;
- }
- if (wacom->tool[1] != BTN_TOOL_RUBBER) {
- /* Unknown tool selected default to pen tool */
- wacom->tool[1] = BTN_TOOL_PEN;
- wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
}
wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
@@ -706,17 +793,21 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
if (pressure < 0)
pressure = wacom->features->pressure_max + pressure + 1;
wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, pressure);
+ wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
} else {
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
wacom_report_abs(wcombo, ABS_PRESSURE, 0);
wacom_report_key(wcombo, BTN_STYLUS, 0);
wacom_report_key(wcombo, BTN_STYLUS2, 0);
wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom->id[0] = 0;
+ /* pen is out so touch can be enabled now */
+ touchInProx = 1;
}
- wacom_report_key(wcombo, wacom->tool[1], prox);
+ wacom_report_key(wcombo, wacom->tool[0], prox);
wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
stylusInProx = prox;
- wacom->tool[0] = prox;
return 1;
}
return 0;
@@ -751,6 +842,7 @@ int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
return wacom_intuos_irq(wacom_wac, wcombo);
case TABLETPC:
+ case TABLETPC2FG:
return wacom_tpc_irq(wacom_wac, wcombo);
default:
@@ -791,9 +883,17 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
input_dev_i4s(input_dev, wacom_wac);
input_dev_i(input_dev, wacom_wac);
break;
+ case TABLETPC2FG:
+ input_dev_tpc2fg(input_dev, wacom_wac);
+ /* fall through */
+ case TABLETPC:
+ input_dev_tpc(input_dev, wacom_wac);
+ if (wacom_wac->features->device_type != BTN_TOOL_PEN)
+ break; /* no need to process stylus stuff */
+
+ /* fall through */
case PL:
case PTU:
- case TABLETPC:
input_dev_pl(input_dev, wacom_wac);
/* fall through */
case PENPARTNER:
@@ -804,66 +904,69 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
}
static struct wacom_features wacom_features[] = {
- { "Wacom Penpartner", 7, 5040, 3780, 255, 0, PENPARTNER },
- { "Wacom Graphire", 8, 10206, 7422, 511, 63, GRAPHIRE },
- { "Wacom Graphire2 4x5", 8, 10206, 7422, 511, 63, GRAPHIRE },
- { "Wacom Graphire2 5x7", 8, 13918, 10206, 511, 63, GRAPHIRE },
- { "Wacom Graphire3", 8, 10208, 7424, 511, 63, GRAPHIRE },
- { "Wacom Graphire3 6x8", 8, 16704, 12064, 511, 63, GRAPHIRE },
- { "Wacom Graphire4 4x5", 8, 10208, 7424, 511, 63, WACOM_G4 },
- { "Wacom Graphire4 6x8", 8, 16704, 12064, 511, 63, WACOM_G4 },
- { "Wacom BambooFun 4x5", 9, 14760, 9225, 511, 63, WACOM_MO },
- { "Wacom BambooFun 6x8", 9, 21648, 13530, 511, 63, WACOM_MO },
- { "Wacom Bamboo1 Medium",8, 16704, 12064, 511, 63, GRAPHIRE },
- { "Wacom Volito", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom PenStation2", 8, 3250, 2320, 255, 63, GRAPHIRE },
- { "Wacom Volito2 4x5", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom Volito2 2x3", 8, 3248, 2320, 511, 63, GRAPHIRE },
- { "Wacom PenPartner2", 8, 3250, 2320, 511, 63, GRAPHIRE },
- { "Wacom Bamboo", 9, 14760, 9225, 511, 63, WACOM_MO },
- { "Wacom Bamboo1", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom Intuos 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
- { "Wacom Intuos 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
- { "Wacom Intuos 9x12", 10, 30480, 24060, 1023, 31, INTUOS },
- { "Wacom Intuos 12x12", 10, 30480, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos 12x18", 10, 45720, 31680, 1023, 31, INTUOS },
- { "Wacom PL400", 8, 5408, 4056, 255, 0, PL },
- { "Wacom PL500", 8, 6144, 4608, 255, 0, PL },
- { "Wacom PL600", 8, 6126, 4604, 255, 0, PL },
- { "Wacom PL600SX", 8, 6260, 5016, 255, 0, PL },
- { "Wacom PL550", 8, 6144, 4608, 511, 0, PL },
- { "Wacom PL800", 8, 7220, 5780, 511, 0, PL },
- { "Wacom PL700", 8, 6758, 5406, 511, 0, PL },
- { "Wacom PL510", 8, 6282, 4762, 511, 0, PL },
- { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL },
- { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL },
- { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL },
- { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL },
- { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU },
- { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
- { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
- { "Wacom Intuos2 9x12", 10, 30480, 24060, 1023, 31, INTUOS },
- { "Wacom Intuos2 12x12", 10, 30480, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos2 12x18", 10, 45720, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos3 4x5", 10, 25400, 20320, 1023, 63, INTUOS3S },
- { "Wacom Intuos3 6x8", 10, 40640, 30480, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 9x12", 10, 60960, 45720, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 12x12", 10, 60960, 60960, 1023, 63, INTUOS3L },
- { "Wacom Intuos3 12x19", 10, 97536, 60960, 1023, 63, INTUOS3L },
- { "Wacom Intuos3 6x11", 10, 54204, 31750, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 4x6", 10, 31496, 19685, 1023, 63, INTUOS3S },
- { "Wacom Intuos4 4x6", 10, 31496, 19685, 2047, 63, INTUOS4S },
- { "Wacom Intuos4 6x9", 10, 44704, 27940, 2047, 63, INTUOS4 },
- { "Wacom Intuos4 8x13", 10, 65024, 40640, 2047, 63, INTUOS4L },
- { "Wacom Intuos4 12x19", 10, 97536, 60960, 2047, 63, INTUOS4L },
- { "Wacom Cintiq 21UX", 10, 87200, 65600, 1023, 63, CINTIQ },
- { "Wacom Cintiq 20WSX", 10, 86680, 54180, 1023, 63, WACOM_BEE },
- { "Wacom Cintiq 12WX", 10, 53020, 33440, 1023, 63, WACOM_BEE },
- { "Wacom DTU1931", 8, 37832, 30305, 511, 0, PL },
- { "Wacom ISDv4 90", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom ISDv4 93", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom ISDv4 9A", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255, 0, PENPARTNER },
+ { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
+ { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
+ { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511, 63, GRAPHIRE },
+ { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, GRAPHIRE },
+ { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
+ { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, WACOM_G4 },
+ { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, WACOM_G4 },
+ { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
+ { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511, 63, WACOM_MO },
+ { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
+ { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255, 63, GRAPHIRE },
+ { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511, 63, GRAPHIRE },
+ { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511, 63, GRAPHIRE },
+ { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
+ { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
+ { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
+ { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
+ { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255, 0, PL },
+ { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255, 0, PL },
+ { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255, 0, PL },
+ { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255, 0, PL },
+ { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511, 0, PL },
+ { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511, 0, PL },
+ { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511, 0, PL },
+ { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
+ { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511, 0, PL },
+ { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
+ { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
+ { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
+ { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511, 0, PTU },
+ { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
+ { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
+ { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023, 63, INTUOS3S },
+ { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023, 63, INTUOS3L },
+ { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023, 63, INTUOS3L },
+ { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023, 63, INTUOS3S },
+ { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, 63, INTUOS4S },
+ { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS4 },
+ { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, 63, INTUOS4L },
+ { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L },
+ { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 63, CINTIQ },
+ { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023, 63, WACOM_BEE },
+ { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE },
+ { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL },
+ { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 9F", WACOM_PKGLEN_PENABLED, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
+ { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
+ { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
{ }
};
@@ -927,6 +1030,9 @@ static struct usb_device_id wacom_ids[] = {
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x90) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x93) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9A) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9F) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE2) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE3) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index c10235a..ee01e19 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,12 +9,33 @@
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
+/* maximum packet length for USB devices */
+#define WACOM_PKGLEN_MAX 32
+
+/* packet length for individual models */
+#define WACOM_PKGLEN_PENPRTN 7
+#define WACOM_PKGLEN_GRAPHIRE 8
+#define WACOM_PKGLEN_BBFUN 9
+#define WACOM_PKGLEN_INTUOS 10
+#define WACOM_PKGLEN_PENABLED 8
+#define WACOM_PKGLEN_TPC1FG 5
+#define WACOM_PKGLEN_TPC2FG 14
+
+/* device IDs */
#define STYLUS_DEVICE_ID 0x02
#define TOUCH_DEVICE_ID 0x03
#define CURSOR_DEVICE_ID 0x06
#define ERASER_DEVICE_ID 0x0A
#define PAD_DEVICE_ID 0x0F
+/* wacom data packet report IDs */
+#define WACOM_REPORT_PENABLED 2
+#define WACOM_REPORT_INTUOSREAD 5
+#define WACOM_REPORT_INTUOSWRITE 6
+#define WACOM_REPORT_INTUOSPAD 12
+#define WACOM_REPORT_TPC1FG 6
+#define WACOM_REPORT_TPC2FG 13
+
enum {
PENPARTNER = 0,
GRAPHIRE,
@@ -32,6 +53,7 @@ enum {
WACOM_BEE,
WACOM_MO,
TABLETPC,
+ TABLETPC2FG,
MAX_TYPE
};
@@ -43,8 +65,11 @@ struct wacom_features {
int pressure_max;
int distance_max;
int type;
- int touch_x_max;
- int touch_y_max;
+ int device_type;
+ int x_phy;
+ int y_phy;
+ unsigned char unit;
+ unsigned char unitExpo;
};
struct wacom_wac {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32fc8ba..dfafc76 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -450,6 +450,18 @@ config TOUCHSCREEN_USB_COMPOSITE
To compile this driver as a module, choose M here: the
module will be called usbtouchscreen.
+config TOUCHSCREEN_MC13783
+ tristate "Freescale MC13783 touchscreen input driver"
+ depends on MFD_MC13783
+ help
+ Say Y here if you have an Freescale MC13783 PMIC on your
+ board and want to use its touchscreen
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mc13783_ts.
+
config TOUCHSCREEN_USB_EGALAX
default y
bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f1f59c9..d61a3b4 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index c21e6d3..794d070 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -47,6 +47,7 @@
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
+#include <linux/gpio.h>
#include <linux/spi/ad7879.h>
@@ -132,7 +133,9 @@ struct ad7879 {
struct input_dev *input;
struct work_struct work;
struct timer_list timer;
-
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+#endif
struct mutex mutex;
unsigned disabled:1; /* P: mutex */
@@ -150,11 +153,9 @@ struct ad7879 {
u8 median;
u16 x_plate_ohms;
u16 pressure_max;
- u16 gpio_init;
u16 cmd_crtl1;
u16 cmd_crtl2;
u16 cmd_crtl3;
- unsigned gpio:1;
};
static int ad7879_read(bus_device *, u8);
@@ -237,24 +238,6 @@ static irqreturn_t ad7879_irq(int irq, void *handle)
static void ad7879_setup(struct ad7879 *ts)
{
- ts->cmd_crtl3 = AD7879_YPLUS_BIT |
- AD7879_XPLUS_BIT |
- AD7879_Z2_BIT |
- AD7879_Z1_BIT |
- AD7879_TEMPMASK_BIT |
- AD7879_AUXVBATMASK_BIT |
- AD7879_GPIOALERTMASK_BIT;
-
- ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
- AD7879_AVG(ts->averaging) |
- AD7879_MFS(ts->median) |
- AD7879_FCD(ts->first_conversion_delay) |
- ts->gpio_init;
-
- ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
- AD7879_ACQ(ts->acquisition_time) |
- AD7879_TMR(ts->pen_down_acc_interval);
-
ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3);
ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1);
@@ -324,48 +307,132 @@ static ssize_t ad7879_disable_store(struct device *dev,
static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store);
-static ssize_t ad7879_gpio_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static struct attribute *ad7879_attributes[] = {
+ &dev_attr_disable.attr,
+ NULL
+};
+
+static const struct attribute_group ad7879_attr_group = {
+ .attrs = ad7879_attributes,
+};
+
+#ifdef CONFIG_GPIOLIB
+static int ad7879_gpio_direction_input(struct gpio_chip *chip,
+ unsigned gpio)
{
- struct ad7879 *ts = dev_get_drvdata(dev);
+ struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ int err;
- return sprintf(buf, "%u\n", ts->gpio);
+ mutex_lock(&ts->mutex);
+ ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIODIR | AD7879_GPIOPOL;
+ err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ mutex_unlock(&ts->mutex);
+
+ return err;
}
-static ssize_t ad7879_gpio_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int ad7879_gpio_direction_output(struct gpio_chip *chip,
+ unsigned gpio, int level)
{
- struct ad7879 *ts = dev_get_drvdata(dev);
- unsigned long val;
- int error;
+ struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ int err;
- error = strict_strtoul(buf, 10, &val);
- if (error)
- return error;
+ mutex_lock(&ts->mutex);
+ ts->cmd_crtl2 &= ~AD7879_GPIODIR;
+ ts->cmd_crtl2 |= AD7879_GPIO_EN | AD7879_GPIOPOL;
+ if (level)
+ ts->cmd_crtl2 |= AD7879_GPIO_DATA;
+ else
+ ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
+
+ err = ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ mutex_unlock(&ts->mutex);
+
+ return err;
+}
+
+static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ u16 val;
mutex_lock(&ts->mutex);
- ts->gpio = !!val;
- error = ad7879_write(ts->bus, AD7879_REG_CTRL2,
- ts->gpio ?
- ts->cmd_crtl2 & ~AD7879_GPIO_DATA :
- ts->cmd_crtl2 | AD7879_GPIO_DATA);
+ val = ad7879_read(ts->bus, AD7879_REG_CTRL2);
mutex_unlock(&ts->mutex);
- return error ? : count;
+ return !!(val & AD7879_GPIO_DATA);
}
-static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store);
+static void ad7879_gpio_set_value(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct ad7879 *ts = container_of(chip, struct ad7879, gc);
-static struct attribute *ad7879_attributes[] = {
- &dev_attr_disable.attr,
- &dev_attr_gpio.attr,
- NULL
-};
+ mutex_lock(&ts->mutex);
+ if (value)
+ ts->cmd_crtl2 |= AD7879_GPIO_DATA;
+ else
+ ts->cmd_crtl2 &= ~AD7879_GPIO_DATA;
-static const struct attribute_group ad7879_attr_group = {
- .attrs = ad7879_attributes,
-};
+ ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2);
+ mutex_unlock(&ts->mutex);
+}
+
+static int __devinit ad7879_gpio_add(struct device *dev)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+ struct ad7879_platform_data *pdata = dev->platform_data;
+ int ret = 0;
+
+ if (pdata->gpio_export) {
+ ts->gc.direction_input = ad7879_gpio_direction_input;
+ ts->gc.direction_output = ad7879_gpio_direction_output;
+ ts->gc.get = ad7879_gpio_get_value;
+ ts->gc.set = ad7879_gpio_set_value;
+ ts->gc.can_sleep = 1;
+ ts->gc.base = pdata->gpio_base;
+ ts->gc.ngpio = 1;
+ ts->gc.label = "AD7879-GPIO";
+ ts->gc.owner = THIS_MODULE;
+ ts->gc.dev = dev;
+
+ ret = gpiochip_add(&ts->gc);
+ if (ret)
+ dev_err(dev, "failed to register gpio %d\n",
+ ts->gc.base);
+ }
+
+ return ret;
+}
+
+/*
+ * We mark ad7879_gpio_remove inline so there is a chance the code
+ * gets discarded when not needed. We can't do __devinit/__devexit
+ * markup since it is used in both probe and remove methods.
+ */
+static inline void ad7879_gpio_remove(struct device *dev)
+{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+ struct ad7879_platform_data *pdata = dev->platform_data;
+ int ret;
+
+ if (pdata->gpio_export) {
+ ret = gpiochip_remove(&ts->gc);
+ if (ret)
+ dev_err(dev, "failed to remove gpio %d\n",
+ ts->gc.base);
+ }
+}
+#else
+static inline int ad7879_gpio_add(struct device *dev)
+{
+ return 0;
+}
+
+static inline void ad7879_gpio_remove(struct device *dev)
+{
+}
+#endif
static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
{
@@ -403,12 +470,6 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
ts->median = pdata->median;
- if (pdata->gpio_output)
- ts->gpio_init = AD7879_GPIO_EN |
- (pdata->gpio_default ? 0 : AD7879_GPIO_DATA);
- else
- ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR;
-
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev));
input_dev->name = "AD7879 Touchscreen";
@@ -446,6 +507,23 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
goto err_free_mem;
}
+ ts->cmd_crtl3 = AD7879_YPLUS_BIT |
+ AD7879_XPLUS_BIT |
+ AD7879_Z2_BIT |
+ AD7879_Z1_BIT |
+ AD7879_TEMPMASK_BIT |
+ AD7879_AUXVBATMASK_BIT |
+ AD7879_GPIOALERTMASK_BIT;
+
+ ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR |
+ AD7879_AVG(ts->averaging) |
+ AD7879_MFS(ts->median) |
+ AD7879_FCD(ts->first_conversion_delay);
+
+ ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 |
+ AD7879_ACQ(ts->acquisition_time) |
+ AD7879_TMR(ts->pen_down_acc_interval);
+
ad7879_setup(ts);
err = request_irq(bus->irq, ad7879_irq,
@@ -460,15 +538,21 @@ static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts)
if (err)
goto err_free_irq;
- err = input_register_device(input_dev);
+ err = ad7879_gpio_add(&bus->dev);
if (err)
goto err_remove_attr;
+ err = input_register_device(input_dev);
+ if (err)
+ goto err_remove_gpio;
+
dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n",
revid >> 8, bus->irq);
return 0;
+err_remove_gpio:
+ ad7879_gpio_remove(&bus->dev);
err_remove_attr:
sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group);
err_free_irq:
@@ -481,6 +565,7 @@ err_free_mem:
static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts)
{
+ ad7879_gpio_remove(&bus->dev);
ad7879_disable(ts);
sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group);
free_irq(ts->bus->irq, ts);
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
new file mode 100644
index 0000000..be115b3
--- /dev/null
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -0,0 +1,258 @@
+/*
+ * Driver for the Freescale Semiconductor MC13783 touchscreen.
+ *
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2009 Sascha Hauer, Pengutronix
+ *
+ * Initial development of this code was funded by
+ * Phytec Messtechnik GmbH, http://www.phytec.de/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/platform_device.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#define MC13783_TS_NAME "mc13783-ts"
+
+#define DEFAULT_SAMPLE_TOLERANCE 300
+
+static unsigned int sample_tolerance = DEFAULT_SAMPLE_TOLERANCE;
+module_param(sample_tolerance, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sample_tolerance,
+ "If the minimal and maximal value read out for one axis (out "
+ "of three) differ by this value (default: "
+ __stringify(DEFAULT_SAMPLE_TOLERANCE) ") or more, the reading "
+ "is supposed to be wrong and is discarded. Set to 0 to "
+ "disable this check.");
+
+struct mc13783_ts_priv {
+ struct input_dev *idev;
+ struct mc13783 *mc13783;
+ struct delayed_work work;
+ struct workqueue_struct *workq;
+ unsigned int sample[4];
+};
+
+static irqreturn_t mc13783_ts_handler(int irq, void *data)
+{
+ struct mc13783_ts_priv *priv = data;
+
+ mc13783_ackirq(priv->mc13783, irq);
+
+ /*
+ * Kick off reading coordinates. Note that if work happens already
+ * be queued for future execution (it rearms itself) it will not
+ * be rescheduled for immediate execution here. However the rearm
+ * delay is HZ / 50 which is acceptable.
+ */
+ queue_delayed_work(priv->workq, &priv->work, 0);
+
+ return IRQ_HANDLED;
+}
+
+#define sort3(a0, a1, a2) ({ \
+ if (a0 > a1) \
+ swap(a0, a1); \
+ if (a1 > a2) \
+ swap(a1, a2); \
+ if (a0 > a1) \
+ swap(a0, a1); \
+ })
+
+static void mc13783_ts_report_sample(struct mc13783_ts_priv *priv)
+{
+ struct input_dev *idev = priv->idev;
+ int x0, x1, x2, y0, y1, y2;
+ int cr0, cr1;
+
+ /*
+ * the values are 10-bit wide only, but the two least significant
+ * bits are for future 12 bit use and reading yields 0
+ */
+ x0 = priv->sample[0] & 0xfff;
+ x1 = priv->sample[1] & 0xfff;
+ x2 = priv->sample[2] & 0xfff;
+ y0 = priv->sample[3] & 0xfff;
+ y1 = (priv->sample[0] >> 12) & 0xfff;
+ y2 = (priv->sample[1] >> 12) & 0xfff;
+ cr0 = (priv->sample[2] >> 12) & 0xfff;
+ cr1 = (priv->sample[3] >> 12) & 0xfff;
+
+ dev_dbg(&idev->dev,
+ "x: (% 4d,% 4d,% 4d) y: (% 4d, % 4d,% 4d) cr: (% 4d, % 4d)\n",
+ x0, x1, x2, y0, y1, y2, cr0, cr1);
+
+ sort3(x0, x1, x2);
+ sort3(y0, y1, y2);
+
+ cr0 = (cr0 + cr1) / 2;
+
+ if (!cr0 || !sample_tolerance ||
+ (x2 - x0 < sample_tolerance &&
+ y2 - y0 < sample_tolerance)) {
+ /* report the median coordinate and average pressure */
+ if (cr0) {
+ input_report_abs(idev, ABS_X, x1);
+ input_report_abs(idev, ABS_Y, y1);
+
+ dev_dbg(&idev->dev, "report (%d, %d, %d)\n",
+ x1, y1, 0x1000 - cr0);
+ queue_delayed_work(priv->workq, &priv->work, HZ / 50);
+ } else
+ dev_dbg(&idev->dev, "report release\n");
+
+ input_report_abs(idev, ABS_PRESSURE,
+ cr0 ? 0x1000 - cr0 : cr0);
+ input_report_key(idev, BTN_TOUCH, cr0);
+ input_sync(idev);
+ } else
+ dev_dbg(&idev->dev, "discard event\n");
+}
+
+static void mc13783_ts_work(struct work_struct *work)
+{
+ struct mc13783_ts_priv *priv =
+ container_of(work, struct mc13783_ts_priv, work.work);
+ unsigned int mode = MC13783_ADC_MODE_TS;
+ unsigned int channel = 12;
+
+ if (mc13783_adc_do_conversion(priv->mc13783,
+ mode, channel, priv->sample) == 0)
+ mc13783_ts_report_sample(priv);
+}
+
+static int mc13783_ts_open(struct input_dev *dev)
+{
+ struct mc13783_ts_priv *priv = input_get_drvdata(dev);
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_ackirq(priv->mc13783, MC13783_IRQ_TS);
+
+ ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_TS,
+ mc13783_ts_handler, MC13783_TS_NAME, priv);
+ if (ret)
+ goto out;
+
+ ret = mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
+ MC13783_ADC0_TSMOD_MASK, MC13783_ADC0_TSMOD0);
+ if (ret)
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
+out:
+ mc13783_unlock(priv->mc13783);
+ return ret;
+}
+
+static void mc13783_ts_close(struct input_dev *dev)
+{
+ struct mc13783_ts_priv *priv = input_get_drvdata(dev);
+
+ mc13783_lock(priv->mc13783);
+ mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
+ MC13783_ADC0_TSMOD_MASK, 0);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
+ mc13783_unlock(priv->mc13783);
+
+ cancel_delayed_work_sync(&priv->work);
+}
+
+static int __init mc13783_ts_probe(struct platform_device *pdev)
+{
+ struct mc13783_ts_priv *priv;
+ struct input_dev *idev;
+ int ret = -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ idev = input_allocate_device();
+ if (!priv || !idev)
+ goto err_free_mem;
+
+ INIT_DELAYED_WORK(&priv->work, mc13783_ts_work);
+ priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ priv->idev = idev;
+
+ /*
+ * We need separate workqueue because mc13783_adc_do_conversion
+ * uses keventd and thus would deadlock.
+ */
+ priv->workq = create_singlethread_workqueue("mc13783_ts");
+ if (!priv->workq)
+ goto err_free_mem;
+
+ idev->name = MC13783_TS_NAME;
+ idev->dev.parent = &pdev->dev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0xfff, 0, 0);
+
+ idev->open = mc13783_ts_open;
+ idev->close = mc13783_ts_close;
+
+ input_set_drvdata(idev, priv);
+
+ ret = input_register_device(priv->idev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register input device failed with %d\n", ret);
+ goto err_destroy_wq;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(priv->workq);
+err_free_mem:
+ input_free_device(idev);
+ kfree(priv);
+ return ret;
+}
+
+static int __devexit mc13783_ts_remove(struct platform_device *pdev)
+{
+ struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ destroy_workqueue(priv->workq);
+ input_unregister_device(priv->idev);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_ts_driver = {
+ .remove = __devexit_p(mc13783_ts_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MC13783_TS_NAME,
+ },
+};
+
+static int __init mc13783_ts_init(void)
+{
+ return platform_driver_probe(&mc13783_ts_driver, &mc13783_ts_probe);
+}
+module_init(mc13783_ts_init);
+
+static void __exit mc13783_ts_exit(void)
+{
+ platform_driver_unregister(&mc13783_ts_driver);
+}
+module_exit(mc13783_ts_exit);
+
+MODULE_DESCRIPTION("MC13783 input touchscreen driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" MC13783_TS_NAME);
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e73..5256123 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
{
- dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ;
- dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ;
+ dev->x = (pkt[2] << 8) | pkt[1];
+ dev->y = (pkt[4] << 8) | pkt[3];
dev->press = pkt[5] & 0xff;
dev->touch = pkt[0] & 0x01;
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
[DEVTYPE_GENERAL_TOUCH] = {
.min_xc = 0x0,
- .max_xc = 0x0500,
+ .max_xc = 0x7fff,
.min_yc = 0x0,
- .max_yc = 0x0500,
+ .max_yc = 0x7fff,
.rept_size = 7,
.read_data = general_touch_read_data,
},
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index a6624ad..1a1420d 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -3152,7 +3152,7 @@ static void
hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
int slot_rx, int bank_rx)
{
- if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
+ if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
/* disable PCM */
mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
return;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 7e5f30d..f1e8af5 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -661,7 +661,7 @@ l1oip_socket_thread(void *data)
size_t recvbuf_size = 1500;
int recvlen;
struct socket *socket = NULL;
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
/* allocate buffer memory */
recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e4f599f..8a0e1ec 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -229,6 +229,12 @@ config LEDS_PWM
help
This option enables support for pwm driven LEDs
+config LEDS_REGULATOR
+ tristate "REGULATOR driven LED support"
+ depends on LEDS_CLASS && REGULATOR
+ help
+ This option enables support for regulator driven LEDs.
+
config LEDS_BD2802
tristate "LED driver for BD2802 RGB LED"
depends on LEDS_CLASS && I2C
@@ -236,6 +242,33 @@ config LEDS_BD2802
This option enables support for BD2802GU RGB LED driver chips
accessed via the I2C bus.
+config LEDS_INTEL_SS4200
+ tristate "LED driver for Intel NAS SS4200 series"
+ depends on LEDS_CLASS && PCI && DMI
+ help
+ This option enables support for the Intel SS4200 series of
+ Network Attached Storage servers. You may control the hard
+ drive or power LEDs on the front panel. Using this driver
+ can stop the front LED from blinking after startup.
+
+config LEDS_LT3593
+ tristate "LED driver for LT3593 controllers"
+ depends on LEDS_CLASS && GENERIC_GPIO
+ help
+ This option enables support for LEDs driven by a Linear Technology
+ LT3593 controller. This controller uses a special one-wire pulse
+ coding protocol to set the brightness.
+
+config LEDS_ADP5520
+ tristate "LED Support for ADP5520/ADP5501 PMIC"
+ depends on LEDS_CLASS && PMIC_ADP5520
+ help
+ This option enables support for on-chip LED drivers found
+ on Analog Devices ADP5520/ADP5501 PMICs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-adp5520.
+
comment "LED Triggers"
config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 46d7270..9e63869 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
+obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
+obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
+obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
new file mode 100644
index 0000000..a8f3159
--- /dev/null
+++ b/drivers/leds/leds-adp5520.c
@@ -0,0 +1,230 @@
+/*
+ * LEDs driver for Analog Devices ADP5520/ADP5501 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Loosely derived from leds-da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct device *master;
+ enum led_brightness new_brightness;
+ int id;
+ int flags;
+};
+
+static void adp5520_led_work(struct work_struct *work)
+{
+ struct adp5520_led *led = container_of(work, struct adp5520_led, work);
+ adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1,
+ led->new_brightness >> 2);
+}
+
+static void adp5520_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct adp5520_led *led;
+
+ led = container_of(led_cdev, struct adp5520_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int adp5520_led_setup(struct adp5520_led *led)
+{
+ struct device *dev = led->master;
+ int flags = led->flags;
+ int ret = 0;
+
+ switch (led->id) {
+ case FLAG_ID_ADP5520_LED1_ADP5501_LED0:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ (flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED1_EN);
+ break;
+ case FLAG_ID_ADP5520_LED2_ADP5501_LED1:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK) << 2);
+ ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_R3_MODE);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED2_EN);
+ break;
+ case FLAG_ID_ADP5520_LED3_ADP5501_LED2:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK) << 4);
+ ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_C3_MODE);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED3_EN);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devinit adp5520_led_prepare(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = pdev->dev.parent;
+ int ret = 0;
+
+ ret |= adp5520_write(dev, ADP5520_LED1_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED2_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED3_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED_TIME, pdata->led_on_time << 6);
+ ret |= adp5520_write(dev, ADP5520_LED_FADE, FADE_VAL(pdata->fade_in,
+ pdata->fade_out));
+
+ return ret;
+}
+
+static int __devinit adp5520_led_probe(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_led *led, *led_dat;
+ struct led_info *cur_led;
+ int ret, i;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->num_leds > ADP5520_01_MAXLEDS) {
+ dev_err(&pdev->dev, "can't handle more than %d LEDS\n",
+ ADP5520_01_MAXLEDS);
+ return -EFAULT;
+ }
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = adp5520_led_prepare(pdev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ cur_led = &pdata->leds[i];
+ led_dat = &led[i];
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->cdev.brightness_set = adp5520_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+
+ if (cur_led->flags & ADP5520_FLAG_LED_MASK)
+ led_dat->flags = cur_led->flags;
+ else
+ led_dat->flags = i + 1;
+
+ led_dat->id = led_dat->flags & ADP5520_FLAG_LED_MASK;
+
+ led_dat->master = pdev->dev.parent;
+ led_dat->new_brightness = LED_OFF;
+
+ INIT_WORK(&led_dat->work, adp5520_led_work);
+
+ ret = led_classdev_register(led_dat->master, &led_dat->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ ret = adp5520_led_setup(led_dat);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ i++;
+ goto err;
+ }
+ }
+
+ platform_set_drvdata(pdev, led);
+ return 0;
+
+err:
+ if (i > 0) {
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+ }
+
+err_free:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit adp5520_led_remove(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_led *led;
+ int i;
+
+ led = platform_get_drvdata(pdev);
+
+ adp5520_clr_bits(led->master, ADP5520_LED_CONTROL,
+ ADP5520_LED1_EN | ADP5520_LED2_EN | ADP5520_LED3_EN);
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver adp5520_led_driver = {
+ .driver = {
+ .name = "adp5520-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_led_probe,
+ .remove = __devexit_p(adp5520_led_remove),
+};
+
+static int __init adp5520_led_init(void)
+{
+ return platform_driver_register(&adp5520_led_driver);
+}
+module_init(adp5520_led_init);
+
+static void __exit adp5520_led_exit(void)
+{
+ platform_driver_unregister(&adp5520_led_driver);
+}
+module_exit(adp5520_led_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-led");
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index 731d4ee..f59ffad 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -11,11 +11,24 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/pci.h>
static int force = 0;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
+#define MSR_LBAR_GPIO 0x5140000C
+#define CS5535_GPIO_SIZE 256
+
+static u32 gpio_base;
+
+static struct pci_device_id divil_pci[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { } /* NULL entry */
+};
+MODULE_DEVICE_TABLE(pci, divil_pci);
+
struct alix_led {
struct led_classdev cdev;
unsigned short port;
@@ -30,9 +43,9 @@ static void alix_led_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct alix_led, cdev);
if (brightness)
- outl(led_dev->on_value, led_dev->port);
+ outl(led_dev->on_value, gpio_base + led_dev->port);
else
- outl(led_dev->off_value, led_dev->port);
+ outl(led_dev->off_value, gpio_base + led_dev->port);
}
static struct alix_led alix_leds[] = {
@@ -41,7 +54,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:1",
.brightness_set = alix_led_set,
},
- .port = 0x6100,
+ .port = 0x00,
.on_value = 1 << 22,
.off_value = 1 << 6,
},
@@ -50,7 +63,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:2",
.brightness_set = alix_led_set,
},
- .port = 0x6180,
+ .port = 0x80,
.on_value = 1 << 25,
.off_value = 1 << 9,
},
@@ -59,7 +72,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:3",
.brightness_set = alix_led_set,
},
- .port = 0x6180,
+ .port = 0x80,
.on_value = 1 << 27,
.off_value = 1 << 11,
},
@@ -101,64 +114,104 @@ static struct platform_driver alix_led_driver = {
},
};
-static int __init alix_present(void)
+static int __init alix_present(unsigned long bios_phys,
+ const char *alix_sig,
+ size_t alix_sig_len)
{
- const unsigned long bios_phys = 0x000f0000;
const size_t bios_len = 0x00010000;
- const char alix_sig[] = "PC Engines ALIX.";
- const size_t alix_sig_len = sizeof(alix_sig) - 1;
-
const char *bios_virt;
const char *scan_end;
const char *p;
- int ret = 0;
+ char name[64];
if (force) {
printk(KERN_NOTICE "%s: forced to skip BIOS test, "
"assume system has ALIX.2 style LEDs\n",
KBUILD_MODNAME);
- ret = 1;
- goto out;
+ return 1;
}
bios_virt = phys_to_virt(bios_phys);
scan_end = bios_virt + bios_len - (alix_sig_len + 2);
for (p = bios_virt; p < scan_end; p++) {
const char *tail;
+ char *a;
- if (memcmp(p, alix_sig, alix_sig_len) != 0) {
+ if (memcmp(p, alix_sig, alix_sig_len) != 0)
continue;
- }
+
+ memcpy(name, p, sizeof(name));
+
+ /* remove the first \0 character from string */
+ a = strchr(name, '\0');
+ if (a)
+ *a = ' ';
+
+ /* cut the string at a newline */
+ a = strchr(name, '\r');
+ if (a)
+ *a = '\0';
tail = p + alix_sig_len;
- if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') {
+ if ((tail[0] == '2' || tail[0] == '3')) {
printk(KERN_INFO
"%s: system is recognized as \"%s\"\n",
- KBUILD_MODNAME, p);
- ret = 1;
- break;
+ KBUILD_MODNAME, name);
+ return 1;
}
}
-out:
- return ret;
+ return 0;
}
static struct platform_device *pdev;
-static int __init alix_led_init(void)
+static int __init alix_pci_led_init(void)
{
- int ret;
+ u32 low, hi;
- if (!alix_present()) {
- ret = -ENODEV;
- goto out;
+ if (pci_dev_present(divil_pci) == 0) {
+ printk(KERN_WARNING KBUILD_MODNAME": DIVIL not found\n");
+ return -ENODEV;
}
- /* enable output on GPIO for LED 1,2,3 */
- outl(1 << 6, 0x6104);
- outl(1 << 9, 0x6184);
- outl(1 << 11, 0x6184);
+ /* Grab the GPIO I/O range */
+ rdmsr(MSR_LBAR_GPIO, low, hi);
+
+ /* Check the mask and whether GPIO is enabled (sanity check) */
+ if (hi != 0x0000f001) {
+ printk(KERN_WARNING KBUILD_MODNAME": GPIO not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Mask off the IO base address */
+ gpio_base = low & 0x0000ff00;
+
+ if (!request_region(gpio_base, CS5535_GPIO_SIZE, KBUILD_MODNAME)) {
+ printk(KERN_ERR KBUILD_MODNAME": can't allocate I/O for GPIO\n");
+ return -ENODEV;
+ }
+
+ /* Set GPIO function to output */
+ outl(1 << 6, gpio_base + 0x04);
+ outl(1 << 9, gpio_base + 0x84);
+ outl(1 << 11, gpio_base + 0x84);
+
+ return 0;
+}
+
+static int __init alix_led_init(void)
+{
+ int ret = -ENODEV;
+ const char tinybios_sig[] = "PC Engines ALIX.";
+ const char coreboot_sig[] = "PC Engines\0ALIX.";
+
+ if (alix_present(0xf0000, tinybios_sig, sizeof(tinybios_sig) - 1) ||
+ alix_present(0x500, coreboot_sig, sizeof(coreboot_sig) - 1))
+ ret = alix_pci_led_init();
+
+ if (ret < 0)
+ return ret;
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
@@ -168,7 +221,6 @@ static int __init alix_led_init(void)
} else
ret = PTR_ERR(pdev);
-out:
return ret;
}
@@ -176,6 +228,7 @@ static void __exit alix_led_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&alix_led_driver);
+ release_region(gpio_base, CS5535_GPIO_SIZE);
}
module_init(alix_led_init);
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8816806..da5fb01 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -31,7 +31,7 @@ static struct led_classdev qube_front_led = {
.name = "qube::front",
.brightness = LED_FULL,
.brightness_set = qube_front_led_set,
- .default_trigger = "ide-disk",
+ .default_trigger = "default-on",
};
static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
@@ -43,7 +43,7 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, res->end - res->start + 1);
+ led_port = ioremap(res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index defc212..438d483 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -84,7 +84,7 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, res->end - res->start + 1);
+ led_port = ioremap(res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
new file mode 100644
index 0000000..fee40a8
--- /dev/null
+++ b/drivers/leds/leds-lt3593.c
@@ -0,0 +1,217 @@
+/*
+ * LEDs driver for LT3593 controllers
+ *
+ * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * Based on leds-gpio.c,
+ *
+ * Copyright (C) 2007 8D Technologies inc.
+ * Raphael Assenat <raph@8d.com>
+ * Copyright (C) 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+struct lt3593_led_data {
+ struct led_classdev cdev;
+ unsigned gpio;
+ struct work_struct work;
+ u8 new_level;
+};
+
+static void lt3593_led_work(struct work_struct *work)
+{
+ int pulses;
+ struct lt3593_led_data *led_dat =
+ container_of(work, struct lt3593_led_data, work);
+
+ /*
+ * The LT3593 resets its internal current level register to the maximum
+ * level on the first falling edge on the control pin. Each following
+ * falling edge decreases the current level by 625uA. Up to 32 pulses
+ * can be sent, so the maximum power reduction is 20mA.
+ * After a timeout of 128us, the value is taken from the register and
+ * applied is to the output driver.
+ */
+
+ if (led_dat->new_level == 0) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ return;
+ }
+
+ pulses = 32 - (led_dat->new_level * 32) / 255;
+
+ if (pulses == 0) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ mdelay(1);
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+ return;
+ }
+
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+
+ while (pulses--) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ udelay(1);
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+ udelay(1);
+ }
+}
+
+static void lt3593_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct lt3593_led_data *led_dat =
+ container_of(led_cdev, struct lt3593_led_data, cdev);
+
+ led_dat->new_level = value;
+ schedule_work(&led_dat->work);
+}
+
+static int __devinit create_lt3593_led(const struct gpio_led *template,
+ struct lt3593_led_data *led_dat, struct device *parent)
+{
+ int ret, state;
+
+ /* skip leds on GPIOs that aren't available */
+ if (!gpio_is_valid(template->gpio)) {
+ printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
+ KBUILD_MODNAME, template->gpio, template->name);
+ return 0;
+ }
+
+ ret = gpio_request(template->gpio, template->name);
+ if (ret < 0)
+ return ret;
+
+ led_dat->cdev.name = template->name;
+ led_dat->cdev.default_trigger = template->default_trigger;
+ led_dat->gpio = template->gpio;
+
+ led_dat->cdev.brightness_set = lt3593_led_set;
+
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+
+ if (!template->retain_state_suspended)
+ led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+
+ ret = gpio_direction_output(led_dat->gpio, state);
+ if (ret < 0)
+ goto err;
+
+ INIT_WORK(&led_dat->work, lt3593_led_work);
+
+ ret = led_classdev_register(parent, &led_dat->cdev);
+ if (ret < 0)
+ goto err;
+
+ printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
+ KBUILD_MODNAME, template->name, template->gpio);
+
+ return 0;
+
+err:
+ gpio_free(led_dat->gpio);
+ return ret;
+}
+
+static void delete_lt3593_led(struct lt3593_led_data *led)
+{
+ if (!gpio_is_valid(led->gpio))
+ return;
+
+ led_classdev_unregister(&led->cdev);
+ cancel_work_sync(&led->work);
+ gpio_free(led->gpio);
+}
+
+static int __devinit lt3593_led_probe(struct platform_device *pdev)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct lt3593_led_data *leds_data;
+ int i, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ leds_data = kzalloc(sizeof(struct lt3593_led_data) * pdata->num_leds,
+ GFP_KERNEL);
+ if (!leds_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
+ &pdev->dev);
+ if (ret < 0)
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, leds_data);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--)
+ delete_lt3593_led(&leds_data[i]);
+
+ kfree(leds_data);
+
+ return ret;
+}
+
+static int __devexit lt3593_led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct lt3593_led_data *leds_data;
+
+ leds_data = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->num_leds; i++)
+ delete_lt3593_led(&leds_data[i]);
+
+ kfree(leds_data);
+
+ return 0;
+}
+
+static struct platform_driver lt3593_led_driver = {
+ .probe = lt3593_led_probe,
+ .remove = __devexit_p(lt3593_led_remove),
+ .driver = {
+ .name = "leds-lt3593",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platform:leds-lt3593");
+
+static int __init lt3593_led_init(void)
+{
+ return platform_driver_register(&lt3593_led_driver);
+}
+
+static void __exit lt3593_led_exit(void)
+{
+ platform_driver_unregister(&lt3593_led_driver);
+}
+
+module_init(lt3593_led_init);
+module_exit(lt3593_led_exit);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("LED driver for LT3593 controllers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index cdfdc87..88b1dd0 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,7 +27,6 @@ struct led_pwm_data {
struct pwm_device *pwm;
unsigned int active_low;
unsigned int period;
- unsigned int max_brightness;
};
static void led_pwm_set(struct led_classdev *led_cdev,
@@ -35,7 +34,7 @@ static void led_pwm_set(struct led_classdev *led_cdev,
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
- unsigned int max = led_dat->max_brightness;
+ unsigned int max = led_dat->cdev.max_brightness;
unsigned int period = led_dat->period;
if (brightness == 0) {
@@ -77,10 +76,10 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.name = cur_led->name;
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->active_low = cur_led->active_low;
- led_dat->max_brightness = cur_led->max_brightness;
led_dat->period = cur_led->pwm_period_ns;
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
+ led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
new file mode 100644
index 0000000..7f00de3
--- /dev/null
+++ b/drivers/leds/leds-regulator.c
@@ -0,0 +1,242 @@
+/*
+ * leds-regulator.c - LED class driver for regulator driven LEDs.
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * Inspired by leds-wm8350 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/leds-regulator.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define to_regulator_led(led_cdev) \
+ container_of(led_cdev, struct regulator_led, cdev)
+
+struct regulator_led {
+ struct led_classdev cdev;
+ enum led_brightness value;
+ int enabled;
+ struct mutex mutex;
+ struct work_struct work;
+
+ struct regulator *vcc;
+};
+
+static inline int led_regulator_get_max_brightness(struct regulator *supply)
+{
+ int ret;
+ int voltage = regulator_list_voltage(supply, 0);
+
+ if (voltage <= 0)
+ return 1;
+
+ /* even if regulator can't change voltages,
+ * we still assume it can change status
+ * and the LED can be turned on and off.
+ */
+ ret = regulator_set_voltage(supply, voltage, voltage);
+ if (ret < 0)
+ return 1;
+
+ return regulator_count_voltages(supply);
+}
+
+static int led_regulator_get_voltage(struct regulator *supply,
+ enum led_brightness brightness)
+{
+ if (brightness == 0)
+ return -EINVAL;
+
+ return regulator_list_voltage(supply, brightness - 1);
+}
+
+
+static void regulator_led_enable(struct regulator_led *led)
+{
+ int ret;
+
+ if (led->enabled)
+ return;
+
+ ret = regulator_enable(led->vcc);
+ if (ret != 0) {
+ dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
+ return;
+ }
+
+ led->enabled = 1;
+}
+
+static void regulator_led_disable(struct regulator_led *led)
+{
+ int ret;
+
+ if (!led->enabled)
+ return;
+
+ ret = regulator_disable(led->vcc);
+ if (ret != 0) {
+ dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
+ return;
+ }
+
+ led->enabled = 0;
+}
+
+static void regulator_led_set_value(struct regulator_led *led)
+{
+ int voltage;
+ int ret;
+
+ mutex_lock(&led->mutex);
+
+ if (led->value == LED_OFF) {
+ regulator_led_disable(led);
+ goto out;
+ }
+
+ if (led->cdev.max_brightness > 1) {
+ voltage = led_regulator_get_voltage(led->vcc, led->value);
+ dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
+ led->value, voltage);
+
+ ret = regulator_set_voltage(led->vcc, voltage, voltage);
+ if (ret != 0)
+ dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
+ voltage, ret);
+ }
+
+ regulator_led_enable(led);
+
+out:
+ mutex_unlock(&led->mutex);
+}
+
+static void led_work(struct work_struct *work)
+{
+ struct regulator_led *led;
+
+ led = container_of(work, struct regulator_led, work);
+ regulator_led_set_value(led);
+}
+
+static void regulator_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct regulator_led *led = to_regulator_led(led_cdev);
+
+ led->value = value;
+ schedule_work(&led->work);
+}
+
+static int __devinit regulator_led_probe(struct platform_device *pdev)
+{
+ struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
+ struct regulator_led *led;
+ struct regulator *vcc;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+
+ vcc = regulator_get_exclusive(&pdev->dev, "vled");
+ if (IS_ERR(vcc)) {
+ dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
+ return PTR_ERR(vcc);
+ }
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ ret = -ENOMEM;
+ goto err_vcc;
+ }
+
+ led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
+ if (pdata->brightness > led->cdev.max_brightness) {
+ dev_err(&pdev->dev, "Invalid default brightness %d\n",
+ pdata->brightness);
+ ret = -EINVAL;
+ goto err_led;
+ }
+ led->value = pdata->brightness;
+
+ led->cdev.brightness_set = regulator_led_brightness_set;
+ led->cdev.name = pdata->name;
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->vcc = vcc;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, led_work);
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0) {
+ cancel_work_sync(&led->work);
+ goto err_led;
+ }
+
+ /* to expose the default value to userspace */
+ led->cdev.brightness = led->value;
+
+ /* Set the default led status */
+ regulator_led_set_value(led);
+
+ return 0;
+
+err_led:
+ kfree(led);
+err_vcc:
+ regulator_put(vcc);
+ return ret;
+}
+
+static int __devexit regulator_led_remove(struct platform_device *pdev)
+{
+ struct regulator_led *led = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&led->cdev);
+ cancel_work_sync(&led->work);
+ regulator_led_disable(led);
+ regulator_put(led->vcc);
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver regulator_led_driver = {
+ .driver = {
+ .name = "leds-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = regulator_led_probe,
+ .remove = __devexit_p(regulator_led_remove),
+};
+
+static int __init regulator_led_init(void)
+{
+ return platform_driver_register(&regulator_led_driver);
+}
+module_init(regulator_led_init);
+
+static void __exit regulator_led_exit(void)
+{
+ platform_driver_unregister(&regulator_led_driver);
+}
+module_exit(regulator_led_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("Regulator driven LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-regulator");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
new file mode 100644
index 0000000..97f0498
--- /dev/null
+++ b/drivers/leds/leds-ss4200.c
@@ -0,0 +1,556 @@
+/*
+ * SS4200-E Hardware API
+ * Copyright (c) 2009, Intel Corporation.
+ * Copyright IBM Corporation, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Dave Hansen <dave@sr71.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>");
+MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * ICH7 LPC/GPIO PCI Config register offsets
+ */
+#define PMBASE 0x040
+#define GPIO_BASE 0x048
+#define GPIO_CTRL 0x04c
+#define GPIO_EN 0x010
+
+/*
+ * The ICH7 GPIO register block is 64 bytes in size.
+ */
+#define ICH7_GPIO_SIZE 64
+
+/*
+ * Define register offsets within the ICH7 register block.
+ */
+#define GPIO_USE_SEL 0x000
+#define GP_IO_SEL 0x004
+#define GP_LVL 0x00c
+#define GPO_BLINK 0x018
+#define GPI_INV 0x030
+#define GPIO_USE_SEL2 0x034
+#define GP_IO_SEL2 0x038
+#define GP_LVL2 0x03c
+
+/*
+ * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
+ */
+static struct pci_device_id ich7_lpc_pci_id[] =
+{
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
+ { } /* NULL entry */
+};
+
+MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id);
+
+static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
+{
+ pr_info("detected '%s'\n", id->ident);
+ return 1;
+}
+
+static unsigned int __initdata nodetect;
+module_param_named(nodetect, nodetect, bool, 0);
+MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
+
+/*
+ * struct nas_led_whitelist - List of known good models
+ *
+ * Contains the known good models this driver is compatible with.
+ * When adding a new model try to be as strict as possible. This
+ * makes it possible to keep the false positives (the model is
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+static struct dmi_system_id __initdata nas_led_whitelist[] = {
+ {
+ .callback = ss4200_led_dmi_callback,
+ .ident = "Intel SS4200-E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
+ }
+ },
+};
+
+/*
+ * Base I/O address assigned to the Power Management register block
+ */
+static u32 g_pm_io_base;
+
+/*
+ * Base I/O address assigned to the ICH7 GPIO register block
+ */
+static u32 nas_gpio_io_base;
+
+/*
+ * When we successfully register a region, we are returned a resource.
+ * We use these to identify which regions we need to release on our way
+ * back out.
+ */
+static struct resource *gp_gpio_resource;
+
+struct nasgpio_led {
+ char *name;
+ u32 gpio_bit;
+ struct led_classdev led_cdev;
+};
+
+/*
+ * gpio_bit(s) are the ICH7 GPIO bit assignments
+ */
+static struct nasgpio_led nasgpio_leds[] = {
+ { .name = "hdd1:blue:sata", .gpio_bit = 0 },
+ { .name = "hdd1:amber:sata", .gpio_bit = 1 },
+ { .name = "hdd2:blue:sata", .gpio_bit = 2 },
+ { .name = "hdd2:amber:sata", .gpio_bit = 3 },
+ { .name = "hdd3:blue:sata", .gpio_bit = 4 },
+ { .name = "hdd3:amber:sata", .gpio_bit = 5 },
+ { .name = "hdd4:blue:sata", .gpio_bit = 6 },
+ { .name = "hdd4:amber:sata", .gpio_bit = 7 },
+ { .name = "power:blue:power", .gpio_bit = 27},
+ { .name = "power:amber:power", .gpio_bit = 28},
+};
+
+#define NAS_RECOVERY 0x00000400 /* GPIO10 */
+
+static struct nasgpio_led *
+led_classdev_to_nasgpio_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct nasgpio_led, led_cdev);
+}
+
+static struct nasgpio_led *get_led_named(char *name)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
+ if (strcmp(nasgpio_leds[i].name, name))
+ continue;
+ return &nasgpio_leds[i];
+ }
+ return NULL;
+}
+
+/*
+ * This protects access to the gpio ports.
+ */
+static DEFINE_SPINLOCK(nasgpio_gpio_lock);
+
+/*
+ * There are two gpio ports, one for blinking and the other
+ * for power. @port tells us if we're doing blinking or
+ * power control.
+ *
+ * Caller must hold nasgpio_gpio_lock
+ */
+static void __nasgpio_led_set_attr(struct led_classdev *led_cdev,
+ u32 port, u32 value)
+{
+ struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
+ u32 gpio_out;
+
+ gpio_out = inl(nas_gpio_io_base + port);
+ if (value)
+ gpio_out |= (1<<led->gpio_bit);
+ else
+ gpio_out &= ~(1<<led->gpio_bit);
+
+ outl(gpio_out, nas_gpio_io_base + port);
+}
+
+static void nasgpio_led_set_attr(struct led_classdev *led_cdev,
+ u32 port, u32 value)
+{
+ spin_lock(&nasgpio_gpio_lock);
+ __nasgpio_led_set_attr(led_cdev, port, value);
+ spin_unlock(&nasgpio_gpio_lock);
+}
+
+u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
+{
+ struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
+ u32 gpio_in;
+
+ spin_lock(&nasgpio_gpio_lock);
+ gpio_in = inl(nas_gpio_io_base + port);
+ spin_unlock(&nasgpio_gpio_lock);
+ if (gpio_in & (1<<led->gpio_bit))
+ return 1;
+ return 0;
+}
+
+/*
+ * There is actual brightness control in the hardware,
+ * but it is via smbus commands and not implemented
+ * in this driver.
+ */
+static void nasgpio_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ u32 setting = 0;
+ if (brightness >= LED_HALF)
+ setting = 1;
+ /*
+ * Hold the lock across both operations. This ensures
+ * consistency so that both the "turn off blinking"
+ * and "turn light off" operations complete as a set.
+ */
+ spin_lock(&nasgpio_gpio_lock);
+ /*
+ * LED class documentation asks that past blink state
+ * be disabled when brightness is turned to zero.
+ */
+ if (brightness == 0)
+ __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0);
+ __nasgpio_led_set_attr(led_cdev, GP_LVL, setting);
+ spin_unlock(&nasgpio_gpio_lock);
+}
+
+static int nasgpio_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u32 setting = 1;
+ if (!(*delay_on == 0 && *delay_off == 0) &&
+ !(*delay_on == 500 && *delay_off == 500))
+ return -EINVAL;
+ /*
+ * These are very approximate.
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+
+ nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting);
+
+ return 0;
+}
+
+
+/*
+ * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have
+ * already taken care of this, but we will do so in a non destructive manner
+ * so that we have what we need whether the BIOS did it or not.
+ */
+static int __devinit ich7_gpio_init(struct device *dev)
+{
+ int i;
+ u32 config_data = 0;
+ u32 all_nas_led = 0;
+
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
+ all_nas_led |= (1<<nasgpio_leds[i].gpio_bit);
+
+ spin_lock(&nasgpio_gpio_lock);
+ /*
+ * We need to enable all of the GPIO lines used by the NAS box,
+ * so we will read the current Use Selection and add our usage
+ * to it. This should be benign with regard to the original
+ * BIOS configuration.
+ */
+ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
+ dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data);
+ config_data |= all_nas_led + NAS_RECOVERY;
+ outl(config_data, nas_gpio_io_base + GPIO_USE_SEL);
+ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
+ dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data);
+
+ /*
+ * The LED GPIO outputs need to be configured for output, so we
+ * will ensure that all LED lines are cleared for output and the
+ * RECOVERY line ready for input. This too should be benign with
+ * regard to BIOS configuration.
+ */
+ config_data = inl(nas_gpio_io_base + GP_IO_SEL);
+ dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n",
+ config_data);
+ config_data &= ~all_nas_led;
+ config_data |= NAS_RECOVERY;
+ outl(config_data, nas_gpio_io_base + GP_IO_SEL);
+ config_data = inl(nas_gpio_io_base + GP_IO_SEL);
+ dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data);
+
+ /*
+ * In our final system, the BIOS will initialize the state of all
+ * of the LEDs. For now, we turn them all off (or Low).
+ */
+ config_data = inl(nas_gpio_io_base + GP_LVL);
+ dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data);
+ /*
+ * In our final system, the BIOS will initialize the blink state of all
+ * of the LEDs. For now, we turn blink off for all of them.
+ */
+ config_data = inl(nas_gpio_io_base + GPO_BLINK);
+ dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data);
+
+ /*
+ * At this moment, I am unsure if anything needs to happen with GPI_INV
+ */
+ config_data = inl(nas_gpio_io_base + GPI_INV);
+ dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data);
+
+ spin_unlock(&nasgpio_gpio_lock);
+ return 0;
+}
+
+static void ich7_lpc_cleanup(struct device *dev)
+{
+ /*
+ * If we were given exclusive use of the GPIO
+ * I/O Address range, we must return it.
+ */
+ if (gp_gpio_resource) {
+ dev_dbg(dev, ": Releasing GPIO I/O addresses\n");
+ release_region(nas_gpio_io_base, ICH7_GPIO_SIZE);
+ gp_gpio_resource = NULL;
+ }
+}
+
+/*
+ * The OS has determined that the LPC of the Intel ICH7 Southbridge is present
+ * so we can retrive the required operational information and prepare the GPIO.
+ */
+static struct pci_dev *nas_gpio_pci_dev;
+static int __devinit ich7_lpc_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int status;
+ u32 gc = 0;
+
+ status = pci_enable_device(dev);
+ if (status) {
+ dev_err(&dev->dev, "pci_enable_device failed\n");
+ return -EIO;
+ }
+
+ nas_gpio_pci_dev = dev;
+ status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
+ if (status)
+ goto out;
+ g_pm_io_base &= 0x00000ff80;
+
+ status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
+ if (!(GPIO_EN & gc)) {
+ status = -EEXIST;
+ dev_info(&dev->dev,
+ "ERROR: The LPC GPIO Block has not been enabled.\n");
+ goto out;
+ }
+
+ status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
+ if (0 > status) {
+ dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
+ goto out;
+ }
+ dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
+ nas_gpio_io_base &= 0x00000ffc0;
+
+ /*
+ * Insure that we have exclusive access to the GPIO I/O address range.
+ */
+ gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE,
+ KBUILD_MODNAME);
+ if (NULL == gp_gpio_resource) {
+ dev_info(&dev->dev,
+ "ERROR Unable to register GPIO I/O addresses.\n");
+ status = -1;
+ goto out;
+ }
+
+ /*
+ * Initialize the GPIO for NAS/Home Server Use
+ */
+ ich7_gpio_init(&dev->dev);
+
+out:
+ if (status) {
+ ich7_lpc_cleanup(&dev->dev);
+ pci_disable_device(dev);
+ }
+ return status;
+}
+
+static void ich7_lpc_remove(struct pci_dev *dev)
+{
+ ich7_lpc_cleanup(&dev->dev);
+ pci_disable_device(dev);
+}
+
+/*
+ * pci_driver structure passed to the PCI modules
+ */
+static struct pci_driver nas_gpio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ich7_lpc_pci_id,
+ .probe = ich7_lpc_probe,
+ .remove = ich7_lpc_remove,
+};
+
+static struct led_classdev *get_classdev_for_led_nr(int nr)
+{
+ struct nasgpio_led *nas_led = &nasgpio_leds[nr];
+ struct led_classdev *led = &nas_led->led_cdev;
+ return led;
+}
+
+
+static void set_power_light_amber_noblink(void)
+{
+ struct nasgpio_led *amber = get_led_named("power:amber:power");
+ struct nasgpio_led *blue = get_led_named("power:blue:power");
+
+ if (!amber || !blue)
+ return;
+ /*
+ * LED_OFF implies disabling future blinking
+ */
+ pr_debug("setting blue off and amber on\n");
+
+ nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF);
+ nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
+}
+
+static ssize_t nas_led_blink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led = dev_get_drvdata(dev);
+ int blinking = 0;
+ if (nasgpio_led_get_attr(led, GPO_BLINK))
+ blinking = 1;
+ return sprintf(buf, "%u\n", blinking);
+}
+
+static ssize_t nas_led_blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct led_classdev *led = dev_get_drvdata(dev);
+ unsigned long blink_state;
+
+ ret = strict_strtoul(buf, 10, &blink_state);
+ if (ret)
+ return ret;
+
+ nasgpio_led_set_attr(led, GPO_BLINK, blink_state);
+
+ return size;
+}
+
+static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
+
+static int register_nasgpio_led(int led_nr)
+{
+ int ret;
+ struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
+ struct led_classdev *led = get_classdev_for_led_nr(led_nr);
+
+ led->name = nas_led->name;
+ led->brightness = LED_OFF;
+ if (nasgpio_led_get_attr(led, GP_LVL))
+ led->brightness = LED_FULL;
+ led->brightness_set = nasgpio_led_set_brightness;
+ led->blink_set = nasgpio_led_set_blink;
+ ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
+ if (ret)
+ return ret;
+ ret = device_create_file(led->dev, &dev_attr_blink);
+ if (ret)
+ led_classdev_unregister(led);
+ return ret;
+}
+
+static void unregister_nasgpio_led(int led_nr)
+{
+ struct led_classdev *led = get_classdev_for_led_nr(led_nr);
+ led_classdev_unregister(led);
+ device_remove_file(led->dev, &dev_attr_blink);
+}
+/*
+ * module load/initialization
+ */
+static int __init nas_gpio_init(void)
+{
+ int i;
+ int ret = 0;
+ int nr_devices = 0;
+
+ nr_devices = dmi_check_system(nas_led_whitelist);
+ if (nodetect) {
+ pr_info("skipping hardware autodetection\n");
+ pr_info("Please send 'dmidecode' output to dave@sr71.net\n");
+ nr_devices++;
+ }
+
+ if (nr_devices <= 0) {
+ pr_info("no LED devices found\n");
+ return -ENODEV;
+ }
+
+ pr_info("registering PCI driver\n");
+ ret = pci_register_driver(&nas_gpio_pci_driver);
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
+ ret = register_nasgpio_led(i);
+ if (ret)
+ goto out_err;
+ }
+ /*
+ * When the system powers on, the BIOS leaves the power
+ * light blue and blinking. This will turn it solid
+ * amber once the driver is loaded.
+ */
+ set_power_light_amber_noblink();
+ return 0;
+out_err:
+ for (; i >= 0; i--)
+ unregister_nasgpio_led(i);
+ pci_unregister_driver(&nas_gpio_pci_driver);
+ return ret;
+}
+
+/*
+ * module unload
+ */
+static void __exit nas_gpio_exit(void)
+{
+ int i;
+ pr_info("Unregistering driver\n");
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
+ unregister_nasgpio_led(i);
+ pci_unregister_driver(&nas_gpio_pci_driver);
+}
+
+module_init(nas_gpio_init);
+module_exit(nas_gpio_exit);
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index 951c57b..ede4658 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -179,8 +179,10 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
* We assume the Guest has the same number of GDT entries as the
* Host, otherwise we'd have to dynamically allocate the Guest GDT.
*/
- if (num >= ARRAY_SIZE(cpu->arch.gdt))
+ if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
kill_guest(cpu, "too many gdt entries %i", num);
+ return;
+ }
/* Set it up, then fix it. */
cpu->arch.gdt[num].a = lo;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 96faa79..f96feeb 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -660,7 +660,7 @@ static int smu_platform_probe(struct of_device* dev,
return 0;
}
-static struct of_device_id smu_platform_match[] =
+static const struct of_device_id smu_platform_match[] =
{
{
.type = "smu",
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index ea32c7e..454bc50 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2211,7 +2211,7 @@ static int fcu_of_remove(struct of_device* dev)
return 0;
}
-static struct of_device_id fcu_match[] =
+static const struct of_device_id fcu_match[] =
{
{
.type = "fcu",
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3fbe41b..ba48fd7 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -457,7 +457,7 @@ therm_of_remove( struct of_device *dev )
return 0;
}
-static struct of_device_id therm_of_match[] = {{
+static const struct of_device_id therm_of_match[] = {{
.name = "fan",
.compatible = "adm1030"
}, {}
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e..f1c8cae 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
{
int r = 0;
size_t dummy = 0;
- int overhead_size =
- sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+ int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
struct receiving_pkg pkg;
+ /*
+ * Given the space needed to hold the 'struct cn_msg' and
+ * 'struct dm_ulog_request' - do we have enough payload
+ * space remaining?
+ */
if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
DMINFO("Size of tfr exceeds preallocated size");
return -EINVAL;
@@ -191,7 +195,7 @@ resend:
*/
mutex_lock(&dm_ulog_lock);
- memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
tfr->luid = luid;
tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd..6c1046d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
/*
* Dispatch io.
*/
- if (unlikely(ms->log_failure)) {
+ if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb..168bd38 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
spin_lock_irq(&rh->region_lock);
if (success)
list_add(&reg->list, &reg->rh->recovered_regions);
- else {
- reg->state = DM_RH_NOSYNC;
+ else
list_add(&reg->list, &reg->rh->failed_recovered_regions);
- }
+
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879..c097d8a 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion.
*/
- INIT_WORK(&req.work, do_metadata);
+ INIT_WORK_ON_STACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1a..bd58703 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
stripes = simple_strtoul(argv[0], &end, 10);
- if (*end) {
+ if (!stripes || *end) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392d..f91b409 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
};
/*
- * The sysfs structure is embedded in md struct, nothing to do here
- */
-static void dm_sysfs_release(struct kobject *kobj)
-{
-}
-
-/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
*/
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
- .release = dm_sysfs_release
};
/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index be62547..4b22feb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
- DMWARN("%s: target device %s is misaligned: "
+ if (bdev_stack_limits(limits, bdev, start) < 0)
+ DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
- (unsigned long long) start << 9);
-
+ (unsigned long long) start << SECTOR_SHIFT);
/*
* Check if merge fn is supported.
@@ -1026,9 +1025,9 @@ combine_limits:
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
- DMWARN("%s: target device "
+ DMWARN("%s: adding target device "
"(start sect %llu len %llu) "
- "is misaligned",
+ "caused an alignment inconsistency",
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
@@ -1080,15 +1079,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
/*
- * Each target device in the table has a data area that should normally
- * be aligned such that the DM device's alignment_offset is 0.
- * FIXME: Propagate alignment_offsets up the stack and warn of
- * sub-optimal or inconsistent settings.
- */
- limits->alignment_offset = 0;
- limits->misaligned = 0;
-
- /*
* Copy table's limits to the DM device's request_queue
*/
q->limits = *limits;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480..aa4e2aa 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_OK;
}
-static void map_request(struct dm_target *ti, struct request *clone,
- struct mapped_device *md)
+/*
+ * Returns:
+ * 0 : the request has been processed (not requeued)
+ * !0 : the request has been requeued
+ */
+static int map_request(struct dm_target *ti, struct request *clone,
+ struct mapped_device *md)
{
- int r;
+ int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
/*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
+ requeued = 1;
break;
default:
if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
dm_kill_unmapped_request(clone, r);
break;
}
+
+ return requeued;
}
/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock);
- map_request(ti, clone, md);
+ if (map_request(ti, clone, md))
+ goto requeued;
+
spin_lock_irq(q->queue_lock);
}
goto out;
+requeued:
+ spin_lock_irq(q->queue_lock);
+
plug_and_out:
if (!elv_queue_empty(q))
/* Some requests still remain, retry later */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f4f5f82..a20a71e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
- !mddev->hold_active) {
+ mddev->ctime == 0 && !mddev->hold_active) {
+ /* Array is not configured at all, and not held active,
+ * so destroy it */
list_del(&mddev->all_mddevs);
if (mddev->gendisk) {
/* we did a probe so need to clean up.
@@ -4073,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private == &md_redundancy_group) {
+ if (mddev->private) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->private != (void*)1)
+ sysfs_remove_group(&mddev->kobj, mddev->private);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
@@ -4285,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- md_probe(mddev->unit, NULL, NULL);
disk = mddev->gendisk;
- if (!disk)
- return -ENOMEM;
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -4355,7 +4356,7 @@ static int do_md_run(mddev_t * mddev)
mddev->barriers_work = 1;
mddev->ok_start_degraded = start_dirty_degraded;
- if (start_readonly)
+ if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
err = mddev->pers->run(mddev);
@@ -4419,33 +4420,6 @@ static int do_md_run(mddev_t * mddev)
set_capacity(disk, mddev->array_sectors);
- /* If there is a partially-recovered drive we need to
- * start recovery here. If we leave it to md_check_recovery,
- * it will remove the drives and not do the right thing
- */
- if (mddev->degraded && !mddev->sync_thread) {
- int spares = 0;
- list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags))
- /* complete an interrupted recovery */
- spares++;
- if (spares && mddev->pers->sync_request) {
- mddev->recovery = 0;
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- mddev->sync_thread = md_register_thread(md_do_sync,
- mddev,
- "resync");
- if (!mddev->sync_thread) {
- printk(KERN_ERR "%s: could not start resync"
- " thread...\n",
- mdname(mddev));
- /* leave the spares where they are, it shouldn't hurt */
- mddev->recovery = 0;
- }
- }
- }
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -4555,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
module_put(mddev->pers->owner);
- if (mddev->pers->sync_request)
- mddev->private = &md_redundancy_group;
+ if (mddev->pers->sync_request && mddev->private == NULL)
+ mddev->private = (void*)1;
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state);
@@ -4603,9 +4577,6 @@ out:
}
mddev->bitmap_info.offset = 0;
- /* make sure all md_delayed_delete calls have finished */
- flush_scheduled_work();
-
export_array(mddev);
mddev->array_sectors = 0;
@@ -5262,6 +5233,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->minor_version = info->minor_version;
mddev->patch_version = info->patch_version;
mddev->persistent = !info->not_persistent;
+ /* ensure mddev_put doesn't delete this now that there
+ * is some minimal configuration.
+ */
+ mddev->ctime = get_seconds();
return 0;
}
mddev->major_version = MD_MAJOR_VERSION;
@@ -6494,10 +6469,11 @@ void md_do_sync(mddev_t *mddev)
mddev->curr_resync = 2;
try_again:
- if (kthread_should_stop()) {
+ if (kthread_should_stop())
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
goto skip;
- }
for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204e..ceb24af 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
free_conf(conf);
- mddev->private = NULL;
+ mddev->private = &raid5_attrs_group;
return 0;
}
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
char nm[20];
- if (rdev->raid_disk >= conf->previous_raid_disks)
+ if (rdev->raid_disk >= conf->previous_raid_disks) {
set_bit(In_sync, &rdev->flags);
- else
+ added_devices++;
+ } else
rdev->recovery_offset = 0;
- added_devices++;
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
break;
}
+ /* When a reshape changes the number of devices, ->degraded
+ * is measured against the large of the pre and post number of
+ * devices.*/
if (mddev->delta_disks > 0) {
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+ mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bff7a53..b521ed9 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -13,7 +13,7 @@
*/
-#include <linux/usb/input.h>
+#include <linux/input.h>
#include <media/ir-common.h>
#define IR_TAB_MIN_SIZE 32
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index becbaad..5ed7526 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
DEB_CAP(("vbuf:%p\n",vb));
- release_all_pagetables(dev, buf);
-
saa7146_dma_free(dev,q,buf);
+
+ release_all_pagetables(dev, buf);
}
static struct videobuf_queue_ops video_qops = {
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c190b0d..2833137 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -144,7 +144,8 @@ static void set_audio(struct dvb_frontend *fe,
}
if (params->mode == V4L2_TUNER_RADIO) {
- priv->tda8290_easy_mode = 0x01; /* Start with MN values */
+ /* Set TDA8295 to FM radio; Start TDA8290 with MN values */
+ priv->tda8290_easy_mode = (priv->ver & TDA8295) ? 0x80 : 0x01;
tuner_dbg("setting to radio FM\n");
} else {
tuner_dbg("setting tda829x to system %s\n", mode);
@@ -672,16 +673,19 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
static int tda8295_probe(struct tuner_i2c_props *i2c_props)
{
#define TDA8295_ID 0x8a
+#define TDA8295C2_ID 0x8b
unsigned char tda8295_id[] = { 0x2f, 0x00 };
/* detect tda8295 */
tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
- if (tda8295_id[1] == TDA8295_ID) {
+ if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
if (debug)
- printk(KERN_DEBUG "%s: tda8295 detected @ %d-%04x\n",
- __func__, i2c_adapter_id(i2c_props->adap),
+ printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
+ __func__, (tda8295_id[1] == TDA8295_ID) ?
+ "tda8295c1" : "tda8295c2",
+ i2c_adapter_id(i2c_props->adap),
i2c_props->addr);
return 0;
}
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 35d0817..cf8f65f 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -72,6 +72,10 @@ comment "Supported Earthsoft PT1 Adapters"
depends on DVB_CORE && PCI && I2C
source "drivers/media/dvb/pt1/Kconfig"
+comment "Supported Mantis Adapters"
+ depends on DVB_CORE && PCI && I2C
+ source "drivers/media/dvb/mantis/Kconfig"
+
comment "Supported DVB Frontends"
depends on DVB_CORE
source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index 16d262d..c12922c 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -2,6 +2,18 @@
# Makefile for the kernel multimedia device drivers.
#
-obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/ pt1/
+obj-y := dvb-core/ \
+ frontends/ \
+ ttpci/ \
+ ttusb-dec/ \
+ ttusb-budget/ \
+ b2c2/ \
+ bt8xx/ \
+ dvb-usb/ \
+ pluto2/ \
+ siano/ \
+ dm1105/ \
+ pt1/ \
+ mantis/
obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790a..9ddc579 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
- INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
init_timer(&dmxdevfilter->timer);
dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
+ INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7..67f189b 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
};
- if (dvb_demux_tscheck) {
- if (!demux->cnt_storage)
- demux->cnt_storage = vmalloc(MAX_PID + 1);
-
- if (!demux->cnt_storage) {
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
- dvb_demux_tscheck = 0;
- goto no_dvb_demux_tscheck;
- }
-
+ if (demux->cnt_storage) {
/* check pkt counter */
if (pid < MAX_PID) {
if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
/* end check */
};
-no_dvb_demux_tscheck:
list_for_each_entry(feed, &demux->feed_list, list_head) {
if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
if (!dvbdemux->feed) {
vfree(dvbdemux->filter);
+ dvbdemux->filter = NULL;
return -ENOMEM;
}
for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed[i].index = i;
}
+ if (dvb_demux_tscheck) {
+ dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+
+ if (!dvbdemux->cnt_storage)
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+ }
+
INIT_LIST_HEAD(&dvbdemux->frontend_list);
for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 1b24989..465295b 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select DVB_DIB7000P if !DVB_FE_CUSTOMISE
- select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
+ select DVB_ATBM8830 if !DVB_FE_CUSTOMISE
+ select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Conexant USB2.0 hybrid reference design.
Currently, only DVB and ATSC modes are supported, analog mode
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index fe44789..6223bf0 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -202,14 +202,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
unsigned long flags;
int su;
- if ((tcode != TCODE_WRITE_QUADLET_REQUEST &&
- tcode != TCODE_WRITE_BLOCK_REQUEST) ||
- offset != CSR_REGISTER_BASE + CSR_FCP_RESPONSE ||
- length == 0 ||
- (((u8 *)payload)[0] & 0xf0) != 0) {
- fw_send_response(card, request, RCODE_TYPE_ERROR);
+ if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0)
return;
- }
su = ((u8 *)payload)[1] & 0x7;
@@ -230,10 +224,8 @@ static void handle_fcp(struct fw_card *card, struct fw_request *request,
}
spin_unlock_irqrestore(&node_list_lock, flags);
- if (fdtv) {
+ if (fdtv)
avc_recv(fdtv, payload, length);
- fw_send_response(card, request, RCODE_COMPLETE);
- }
}
static struct fw_address_handler fcp_handler = {
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index a3b8b697..cd7f9b7 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -208,6 +208,14 @@ config DVB_DS3000
help
A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+config DVB_MB86A16
+ tristate "Fujitsu MB86A16 based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S/DSS Direct Conversion reveiver.
+ Say Y when you want to support this frontend.
+
comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
@@ -587,6 +595,17 @@ config DVB_ATBM8830
help
A DMB-TH tuner module. Say Y when you want to support this frontend.
+config DVB_TDA665x
+ tristate "TDA665x tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Support for tuner modules based on Philips TDA6650/TDA6651 chips.
+ Say Y when you want to support this chip.
+
+ Currently supported tuners:
+ * Panasonic ENV57H12D5 (ET-50DT)
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 47575cc..874e8ad 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_DVB_TDA10048) += tda10048.o
obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
+obj-$(CONFIG_DVB_TDA665x) += tda665x.o
obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
@@ -80,3 +81,4 @@ obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
+obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index d99619a..b1ee207 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -100,7 +100,7 @@ static inline int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_
static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return CT_SHUTDOWN,
+ return CT_SHUTDOWN;
}
static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
{
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index 3051b64..445fa10 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
spi_bias *= qam_tab[p->constellation];
spi_bias /= p->code_rate_HP + 1;
spi_bias /= (guard_tab[p->guard_interval] + 32);
- spi_bias *= 1000ULL;
- spi_bias /= 1000ULL + ppm/1000;
+ spi_bias *= 1000;
+ spi_bias /= 1000 + ppm/1000;
spi_bias *= p->code_rate_HP;
val0x04 = (p->transmission_mode << 2) | p->guard_interval;
diff --git a/drivers/media/dvb/frontends/lgdt3305.h b/drivers/media/dvb/frontends/lgdt3305.h
index 4fa6e52..9cb11c9 100644
--- a/drivers/media/dvb/frontends/lgdt3305.h
+++ b/drivers/media/dvb/frontends/lgdt3305.h
@@ -54,13 +54,13 @@ struct lgdt3305_config {
u16 usref_qam256; /* default: 0x2a80 */
/* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
- int deny_i2c_rptr:1;
+ unsigned int deny_i2c_rptr:1;
/* spectral inversion - 0:disabled 1:enabled */
- int spectral_inversion:1;
+ unsigned int spectral_inversion:1;
/* use RF AGC loop - 0:disabled 1:enabled */
- int rf_agc_loop:1;
+ unsigned int rf_agc_loop:1;
enum lgdt3305_mpeg_mode mpeg_mode;
enum lgdt3305_tp_clock_edge tpclk_edge;
diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
new file mode 100644
index 0000000..d05f750
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.c
@@ -0,0 +1,1878 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "dvb_frontend.h"
+#include "mb86a16.h"
+#include "mb86a16_priv.h"
+
+unsigned int verbose = 5;
+module_param(verbose, int, 0644);
+
+#define ABS(x) ((x) < 0 ? (-x) : (x))
+
+struct mb86a16_state {
+ struct i2c_adapter *i2c_adap;
+ const struct mb86a16_config *config;
+ struct dvb_frontend frontend;
+
+ /* tuning parameters */
+ int frequency;
+ int srate;
+
+ /* Internal stuff */
+ int master_clk;
+ int deci;
+ int csel;
+ int rsel;
+};
+
+#define MB86A16_ERROR 0
+#define MB86A16_NOTICE 1
+#define MB86A16_INFO 2
+#define MB86A16_DEBUG 3
+
+#define dprintk(x, y, z, format, arg...) do { \
+ if (z) { \
+ if ((x > MB86A16_ERROR) && (x > y)) \
+ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_NOTICE) && (x > y)) \
+ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_INFO) && (x > y)) \
+ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_DEBUG) && (x > y)) \
+ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \
+ } else { \
+ if (x > y) \
+ printk(format, ##arg); \
+ } \
+} while (0)
+
+#define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()")
+#define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->")
+
+static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val)
+{
+ int ret;
+ u8 buf[] = { reg, val };
+
+ struct i2c_msg msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ };
+
+ dprintk(verbose, MB86A16_DEBUG, 1,
+ "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]",
+ state->config->demod_address, buf[0], buf[1]);
+
+ ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+ return (ret != 1) ? -EREMOTEIO : 0;
+}
+
+static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val)
+{
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ }, {
+ .addr = state->config->demod_address,
+ .flags = I2C_M_RD,
+ .buf = b1,
+ .len = 1
+ }
+ };
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+ if (ret != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)",
+ reg, ret);
+
+ return -EREMOTEIO;
+ }
+ *val = b1[0];
+
+ return ret;
+}
+
+static int CNTM_set(struct mb86a16_state *state,
+ unsigned char timint1,
+ unsigned char timint2,
+ unsigned char cnext)
+{
+ unsigned char val;
+
+ val = (timint1 << 4) | (timint2 << 2) | cnext;
+ if (mb86a16_write(state, MB86A16_CNTMR, val) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int smrt_set(struct mb86a16_state *state, int rate)
+{
+ int tmp ;
+ int m ;
+ unsigned char STOFS0, STOFS1;
+
+ m = 1 << state->deci;
+ tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk;
+
+ STOFS0 = tmp & 0x0ff;
+ STOFS1 = (tmp & 0xf00) >> 8;
+
+ if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) |
+ (state->csel << 1) |
+ state->rsel) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -1;
+}
+
+static int srst(struct mb86a16_state *state)
+{
+ if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+
+}
+
+static int afcex_data_set(struct mb86a16_state *state,
+ unsigned char AFCEX_L,
+ unsigned char AFCEX_H)
+{
+ if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+
+ return -1;
+}
+
+static int afcofs_data_set(struct mb86a16_state *state,
+ unsigned char AFCEX_L,
+ unsigned char AFCEX_H)
+{
+ if (mb86a16_write(state, 0x58, AFCEX_L) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x59, AFCEX_H) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int stlp_set(struct mb86a16_state *state,
+ unsigned char STRAS,
+ unsigned char STRBS)
+{
+ if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA)
+{
+ if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int initial_set(struct mb86a16_state *state)
+{
+ if (stlp_set(state, 5, 7))
+ goto err;
+
+ udelay(100);
+ if (afcex_data_set(state, 0, 0))
+ goto err;
+
+ udelay(100);
+ if (afcofs_data_set(state, 0, 0))
+ goto err;
+
+ udelay(100);
+ if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x2f, 0x21) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x54, 0xff) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int S01T_set(struct mb86a16_state *state,
+ unsigned char s1t,
+ unsigned s0t)
+{
+ if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+
+static int EN_set(struct mb86a16_state *state,
+ int cren,
+ int afcen)
+{
+ unsigned char val;
+
+ val = 0x7a | (cren << 7) | (afcen << 2);
+ if (mb86a16_write(state, 0x49, val) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int AFCEXEN_set(struct mb86a16_state *state,
+ int afcexen,
+ int smrt)
+{
+ unsigned char AFCA ;
+
+ if (smrt > 18875)
+ AFCA = 4;
+ else if (smrt > 9375)
+ AFCA = 3;
+ else if (smrt > 2250)
+ AFCA = 2;
+ else
+ AFCA = 1;
+
+ if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int DAGC_data_set(struct mb86a16_state *state,
+ unsigned char DAGCA,
+ unsigned char DAGCW)
+{
+ if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static void smrt_info_get(struct mb86a16_state *state, int rate)
+{
+ if (rate >= 37501) {
+ state->deci = 0; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 30001) {
+ state->deci = 0; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 26251) {
+ state->deci = 0; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 22501) {
+ state->deci = 0; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 18751) {
+ state->deci = 1; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 15001) {
+ state->deci = 1; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 13126) {
+ state->deci = 1; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 11251) {
+ state->deci = 1; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 9376) {
+ state->deci = 2; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 7501) {
+ state->deci = 2; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 6563) {
+ state->deci = 2; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 5626) {
+ state->deci = 2; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 4688) {
+ state->deci = 3; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 3751) {
+ state->deci = 3; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 3282) {
+ state->deci = 3; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 2814) {
+ state->deci = 3; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 2344) {
+ state->deci = 4; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 1876) {
+ state->deci = 4; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 1641) {
+ state->deci = 4; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 1407) {
+ state->deci = 4; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 1172) {
+ state->deci = 5; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 939) {
+ state->deci = 5; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 821) {
+ state->deci = 5; state->csel = 1; state->rsel = 0;
+ } else {
+ state->deci = 5; state->csel = 1; state->rsel = 1;
+ }
+
+ if (state->csel == 0)
+ state->master_clk = 92000;
+ else
+ state->master_clk = 61333;
+
+}
+
+static int signal_det(struct mb86a16_state *state,
+ int smrt,
+ unsigned char *SIG)
+{
+
+ int ret ;
+ int smrtd ;
+ int wait_sym ;
+
+ u32 wait_t;
+ unsigned char S[3] ;
+ int i ;
+
+ if (*SIG > 45) {
+ if (CNTM_set(state, 2, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+ wait_sym = 40000;
+ } else {
+ if (CNTM_set(state, 3, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+ wait_sym = 80000;
+ }
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ smrtd = smrt * 98 / 100;
+ else if (i == 1)
+ smrtd = smrt;
+ else
+ smrtd = smrt * 102 / 100;
+ smrt_info_get(state, smrtd);
+ smrt_set(state, smrtd);
+ srst(state);
+ wait_t = (wait_sym + 99 * smrtd / 100) / smrtd;
+ if (wait_t == 0)
+ wait_t = 1;
+ msleep_interruptible(10);
+ if (mb86a16_read(state, 0x37, &(S[i])) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+ }
+ if ((S[1] > S[0] * 112 / 100) &&
+ (S[1] > S[2] * 112 / 100)) {
+
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+ *SIG = S[1];
+
+ if (CNTM_set(state, 0, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+
+ return ret;
+}
+
+static int rf_val_set(struct mb86a16_state *state,
+ int f,
+ int smrt,
+ unsigned char R)
+{
+ unsigned char C, F, B;
+ int M;
+ unsigned char rf_val[5];
+ int ack = -1;
+
+ if (smrt > 37750)
+ C = 1;
+ else if (smrt > 18875)
+ C = 2;
+ else if (smrt > 5500)
+ C = 3;
+ else
+ C = 4;
+
+ if (smrt > 30500)
+ F = 3;
+ else if (smrt > 9375)
+ F = 1;
+ else if (smrt > 4625)
+ F = 0;
+ else
+ F = 2;
+
+ if (f < 1060)
+ B = 0;
+ else if (f < 1175)
+ B = 1;
+ else if (f < 1305)
+ B = 2;
+ else if (f < 1435)
+ B = 3;
+ else if (f < 1570)
+ B = 4;
+ else if (f < 1715)
+ B = 5;
+ else if (f < 1845)
+ B = 6;
+ else if (f < 1980)
+ B = 7;
+ else if (f < 2080)
+ B = 8;
+ else
+ B = 9;
+
+ M = f * (1 << R) / 2;
+
+ rf_val[0] = 0x01 | (C << 3) | (F << 1);
+ rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12);
+ rf_val[2] = (M & 0x00ff0) >> 4;
+ rf_val[3] = ((M & 0x0000f) << 4) | B;
+
+ /* Frequency Set */
+ if (mb86a16_write(state, 0x21, rf_val[0]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x22, rf_val[1]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x23, rf_val[2]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x24, rf_val[3]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x25, 0x01) < 0)
+ ack = 0;
+ if (ack == 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int afcerr_chk(struct mb86a16_state *state)
+{
+ unsigned char AFCM_L, AFCM_H ;
+ int AFCM ;
+ int afcm, afcerr ;
+
+ if (mb86a16_read(state, 0x0e, &AFCM_L) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x0f, &AFCM_H) != 2)
+ goto err;
+
+ AFCM = (AFCM_H << 8) + AFCM_L;
+
+ if (AFCM > 2048)
+ afcm = AFCM - 4096;
+ else
+ afcm = AFCM;
+ afcerr = afcm * state->master_clk / 8192;
+
+ return afcerr;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int dagcm_val_get(struct mb86a16_state *state)
+{
+ int DAGCM;
+ unsigned char DAGCM_H, DAGCM_L;
+
+ if (mb86a16_read(state, 0x45, &DAGCM_L) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x46, &DAGCM_H) != 2)
+ goto err;
+
+ DAGCM = (DAGCM_H << 8) + DAGCM_L;
+
+ return DAGCM;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ u8 stat, stat2;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *status = 0;
+
+ if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2)
+ goto err;
+ if ((stat > 25) && (stat2 > 25))
+ *status |= FE_HAS_SIGNAL;
+ if ((stat > 45) && (stat2 > 45))
+ *status |= FE_HAS_CARRIER;
+
+ if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2)
+ goto err;
+
+ if (stat & 0x01)
+ *status |= FE_HAS_SYNC;
+ if (stat & 0x01)
+ *status |= FE_HAS_VITERBI;
+
+ if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2)
+ goto err;
+
+ if ((stat & 0x0f) && (*status & FE_HAS_VITERBI))
+ *status |= FE_HAS_LOCK;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int sync_chk(struct mb86a16_state *state,
+ unsigned char *VIRM)
+{
+ unsigned char val;
+ int sync;
+
+ if (mb86a16_read(state, 0x0d, &val) != 2)
+ goto err;
+
+ dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val);
+ sync = val & 0x01;
+ *VIRM = (val & 0x1c) >> 2;
+
+ return sync;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+
+}
+
+static int freqerr_chk(struct mb86a16_state *state,
+ int fTP,
+ int smrt,
+ int unit)
+{
+ unsigned char CRM, AFCML, AFCMH;
+ unsigned char temp1, temp2, temp3;
+ int crm, afcm, AFCM;
+ int crrerr, afcerr; /* kHz */
+ int frqerr; /* MHz */
+ int afcen, afcexen = 0;
+ int R, M, fOSC, fOSC_OFS;
+
+ if (mb86a16_read(state, 0x43, &CRM) != 2)
+ goto err;
+
+ if (CRM > 127)
+ crm = CRM - 256;
+ else
+ crm = CRM;
+
+ crrerr = smrt * crm / 256;
+ if (mb86a16_read(state, 0x49, &temp1) != 2)
+ goto err;
+
+ afcen = (temp1 & 0x04) >> 2;
+ if (afcen == 0) {
+ if (mb86a16_read(state, 0x2a, &temp1) != 2)
+ goto err;
+ afcexen = (temp1 & 0x20) >> 5;
+ }
+
+ if (afcen == 1) {
+ if (mb86a16_read(state, 0x0e, &AFCML) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x0f, &AFCMH) != 2)
+ goto err;
+ } else if (afcexen == 1) {
+ if (mb86a16_read(state, 0x2b, &AFCML) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x2c, &AFCMH) != 2)
+ goto err;
+ }
+ if ((afcen == 1) || (afcexen == 1)) {
+ smrt_info_get(state, smrt);
+ AFCM = ((AFCMH & 0x01) << 8) + AFCML;
+ if (AFCM > 255)
+ afcm = AFCM - 512;
+ else
+ afcm = AFCM;
+
+ afcerr = afcm * state->master_clk / 8192;
+ } else
+ afcerr = 0;
+
+ if (mb86a16_read(state, 0x22, &temp1) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x23, &temp2) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x24, &temp3) != 2)
+ goto err;
+
+ R = (temp1 & 0xe0) >> 5;
+ M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4);
+ if (R == 0)
+ fOSC = 2 * M;
+ else
+ fOSC = M;
+
+ fOSC_OFS = fOSC - fTP;
+
+ if (unit == 0) { /* MHz */
+ if (crrerr + afcerr + fOSC_OFS * 1000 >= 0)
+ frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000;
+ else
+ frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000;
+ } else { /* kHz */
+ frqerr = crrerr + afcerr + fOSC_OFS * 1000;
+ }
+
+ return frqerr;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt)
+{
+ unsigned char R;
+
+ if (smrt > 9375)
+ R = 0;
+ else
+ R = 1;
+
+ return R;
+}
+
+static void swp_info_get(struct mb86a16_state *state,
+ int fOSC_start,
+ int smrt,
+ int v, int R,
+ int swp_ofs,
+ int *fOSC,
+ int *afcex_freq,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+ int crnt_swp_freq ;
+
+ crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs;
+
+ if (R == 0)
+ *fOSC = (crnt_swp_freq + 1000) / 2000 * 2;
+ else
+ *fOSC = (crnt_swp_freq + 500) / 1000;
+
+ if (*fOSC >= crnt_swp_freq)
+ *afcex_freq = *fOSC * 1000 - crnt_swp_freq;
+ else
+ *afcex_freq = crnt_swp_freq - *fOSC * 1000;
+
+ AFCEX = *afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+
+static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin,
+ int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1)
+{
+ int swp_freq ;
+
+ if ((i % 2 == 1) && (v <= vmax)) {
+ /* positive v (case 1) */
+ if ((v - 1 == vmin) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v - 1) > SIGMIN)) {
+
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ } else if ((v == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v - 1)) &&
+ (*(V + 30 + v) > SIGMIN)) {
+ /* (case 2) */
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else if ((*(V + 30 + v) > 0) &&
+ (*(V + 30 + v - 1) > 0) &&
+ (*(V + 30 + v - 2) > 0) &&
+ (*(V + 30 + v - 3) > 0) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v - 2) > *(V + 30 + v - 3)) &&
+ ((*(V + 30 + v - 1) > SIGMIN) ||
+ (*(V + 30 + v - 2) > SIGMIN))) {
+ /* (case 3) */
+ if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2;
+ *SIG1 = *(V + 30 + v - 2);
+ }
+ } else if ((v == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v - 2) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v - 2)) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v - 2)) &&
+ ((*(V + 30 + v) > SIGMIN) ||
+ (*(V + 30 + v - 1) > SIGMIN))) {
+ /* (case 4) */
+ if (*(V + 30 + v) >= *(V + 30 + v - 1)) {
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ }
+ } else {
+ swp_freq = -1 ;
+ }
+ } else if ((i % 2 == 0) && (v >= vmin)) {
+ /* Negative v (case 1) */
+ if ((*(V + 30 + v) > 0) &&
+ (*(V + 30 + v + 1) > 0) &&
+ (*(V + 30 + v + 2) > 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v + 1) > SIGMIN)) {
+
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else if ((v + 1 == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 1) > SIGMIN)) {
+ /* (case 2) */
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v);
+ } else if ((v == vmin) &&
+ (*(V + 30 + v) > 0) &&
+ (*(V + 30 + v + 1) > 0) &&
+ (*(V + 30 + v + 2) > 0) &&
+ (*(V + 30 + v) > *(V + 30 + v + 1)) &&
+ (*(V + 30 + v) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v) > SIGMIN)) {
+ /* (case 3) */
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else if ((*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 3) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 2) > *(V + 30 + v + 3)) &&
+ ((*(V + 30 + v + 1) > SIGMIN) ||
+ (*(V + 30 + v + 2) > SIGMIN))) {
+ /* (case 4) */
+ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
+ *SIG1 = *(V + 30 + v + 2);
+ }
+ } else if ((*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 3) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v) > *(V + 30 + v + 3)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 3)) &&
+ ((*(V + 30 + v) > SIGMIN) ||
+ (*(V + 30 + v + 1) > SIGMIN))) {
+ /* (case 5) */
+ if (*(V + 30 + v) >= *(V + 30 + v + 1)) {
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ }
+ } else if ((v + 2 == vmin) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 2) > *(V + 30 + v)) &&
+ ((*(V + 30 + v + 1) > SIGMIN) ||
+ (*(V + 30 + v + 2) > SIGMIN))) {
+ /* (case 6) */
+ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
+ *SIG1 = *(V + 30 + v + 2);
+ }
+ } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) {
+ swp_freq = fOSC * 1000;
+ *SIG1 = *(V + 30 + v);
+ } else
+ swp_freq = -1;
+ } else
+ swp_freq = -1;
+
+ return swp_freq;
+}
+
+static void swp_info_get2(struct mb86a16_state *state,
+ int smrt,
+ int R,
+ int swp_freq,
+ int *afcex_freq,
+ int *fOSC,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+
+ if (R == 0)
+ *fOSC = (swp_freq + 1000) / 2000 * 2;
+ else
+ *fOSC = (swp_freq + 500) / 1000;
+
+ if (*fOSC >= swp_freq)
+ *afcex_freq = *fOSC * 1000 - swp_freq;
+ else
+ *afcex_freq = swp_freq - *fOSC * 1000;
+
+ AFCEX = *afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+static void afcex_info_get(struct mb86a16_state *state,
+ int afcex_freq,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+
+ AFCEX = afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+static int SEQ_set(struct mb86a16_state *state, unsigned char loop)
+{
+ /* SLOCK0 = 0 */
+ if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV)
+{
+ /* Viterbi Rate, IQ Settings */
+ if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int FEC_srst(struct mb86a16_state *state)
+{
+ if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int S2T_set(struct mb86a16_state *state, unsigned char S2T)
+{
+ if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T)
+{
+ if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+
+static int mb86a16_set_fe(struct mb86a16_state *state)
+{
+ u8 agcval, cnmval;
+
+ int i, j;
+ int fOSC = 0;
+ int fOSC_start = 0;
+ int wait_t;
+ int fcp;
+ int swp_ofs;
+ int V[60];
+ u8 SIG1MIN;
+
+ unsigned char CREN, AFCEN, AFCEXEN;
+ unsigned char SIG1;
+ unsigned char TIMINT1, TIMINT2, TIMEXT;
+ unsigned char S0T, S1T;
+ unsigned char S2T;
+/* unsigned char S2T, S3T; */
+ unsigned char S4T, S5T;
+ unsigned char AFCEX_L, AFCEX_H;
+ unsigned char R;
+ unsigned char VIRM;
+ unsigned char ETH, VIA;
+ unsigned char junk;
+
+ int loop;
+ int ftemp;
+ int v, vmax, vmin;
+ int vmax_his, vmin_his;
+ int swp_freq, prev_swp_freq[20];
+ int prev_freq_num;
+ int signal_dupl;
+ int afcex_freq;
+ int signal;
+ int afcerr;
+ int temp_freq, delta_freq;
+ int dagcm[4];
+ int smrt_d;
+/* int freq_err; */
+ int n;
+ int ret = -1;
+ int sync;
+
+ dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
+
+ fcp = 3000;
+ swp_ofs = state->srate / 4;
+
+ for (i = 0; i < 60; i++)
+ V[i] = -1;
+
+ for (i = 0; i < 20; i++)
+ prev_swp_freq[i] = 0;
+
+ SIG1MIN = 25;
+
+ for (n = 0; ((n < 3) && (ret == -1)); n++) {
+ SEQ_set(state, 0);
+ iq_vt_set(state, 0);
+
+ CREN = 0;
+ AFCEN = 0;
+ AFCEXEN = 1;
+ TIMINT1 = 0;
+ TIMINT2 = 1;
+ TIMEXT = 2;
+ S1T = 0;
+ S0T = 0;
+
+ if (initial_set(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "initial set failed");
+ return -1;
+ }
+ if (DAGC_data_set(state, 3, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1; /* (0, 0) */
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1; /* (1, smrt) = (1, symbolrate) */
+ }
+ if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error");
+ return -1; /* (0, 1, 2) */
+ }
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1; /* (0, 0) */
+ }
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error");
+ return -1;
+ }
+
+ R = vco_dev_get(state, state->srate);
+ if (R == 1)
+ fOSC_start = state->frequency;
+
+ else if (R == 0) {
+ if (state->frequency % 2 == 0) {
+ fOSC_start = state->frequency;
+ } else {
+ fOSC_start = state->frequency + 1;
+ if (fOSC_start > 2150)
+ fOSC_start = state->frequency - 1;
+ }
+ }
+ loop = 1;
+ ftemp = fOSC_start * 1000;
+ vmax = 0 ;
+ while (loop == 1) {
+ ftemp = ftemp + swp_ofs;
+ vmax++;
+
+ /* Upper bound */
+ if (ftemp > 2150000) {
+ loop = 0;
+ vmax--;
+ } else {
+ if ((ftemp == 2150000) ||
+ (ftemp - state->frequency * 1000 >= fcp + state->srate / 4))
+ loop = 0;
+ }
+ }
+
+ loop = 1;
+ ftemp = fOSC_start * 1000;
+ vmin = 0 ;
+ while (loop == 1) {
+ ftemp = ftemp - swp_ofs;
+ vmin--;
+
+ /* Lower bound */
+ if (ftemp < 950000) {
+ loop = 0;
+ vmin++;
+ } else {
+ if ((ftemp == 950000) ||
+ (state->frequency * 1000 - ftemp >= fcp + state->srate / 4))
+ loop = 0;
+ }
+ }
+
+ wait_t = (8000 + state->srate / 2) / state->srate;
+ if (wait_t == 0)
+ wait_t = 1;
+
+ i = 0;
+ j = 0;
+ prev_freq_num = 0;
+ loop = 1;
+ signal = 0;
+ vmax_his = 0;
+ vmin_his = 0;
+ v = 0;
+
+ while (loop == 1) {
+ swp_info_get(state, fOSC_start, state->srate,
+ v, R, swp_ofs, &fOSC,
+ &afcex_freq, &AFCEX_L, &AFCEX_H);
+
+ udelay(100);
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ udelay(100);
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ msleep_interruptible(wait_t);
+
+ if (mb86a16_read(state, 0x37, &SIG1) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -1;
+ }
+ V[30 + v] = SIG1 ;
+ swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin,
+ SIG1MIN, fOSC, afcex_freq,
+ swp_ofs, &SIG1); /* changed */
+
+ signal_dupl = 0;
+ for (j = 0; j < prev_freq_num; j++) {
+ if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) {
+ signal_dupl = 1;
+ dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j);
+ }
+ }
+ if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) {
+ dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate);
+ prev_swp_freq[prev_freq_num] = swp_freq;
+ prev_freq_num++;
+ swp_info_get2(state, state->srate, R, swp_freq,
+ &afcex_freq, &fOSC,
+ &AFCEX_L, &AFCEX_H);
+
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ signal = signal_det(state, state->srate, &SIG1);
+ if (signal == 1) {
+ dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****");
+ loop = 0;
+ } else {
+ dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again...");
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ }
+ }
+ if (v > vmax)
+ vmax_his = 1 ;
+ if (v < vmin)
+ vmin_his = 1 ;
+ i++;
+
+ if ((i % 2 == 1) && (vmax_his == 1))
+ i++;
+ if ((i % 2 == 0) && (vmin_his == 1))
+ i++;
+
+ if (i % 2 == 1)
+ v = (i + 1) / 2;
+ else
+ v = -i / 2;
+
+ if ((vmax_his == 1) && (vmin_his == 1))
+ loop = 0 ;
+ }
+
+ if (signal == 1) {
+ dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check");
+ S1T = 7 ;
+ S0T = 1 ;
+ CREN = 0 ;
+ AFCEN = 1 ;
+ AFCEXEN = 0 ;
+
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1;
+ }
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H);
+ if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ /* delay 4~200 */
+ wait_t = 200000 / state->master_clk + 200000 / state->srate;
+ msleep(wait_t);
+ afcerr = afcerr_chk(state);
+ if (afcerr == -1)
+ return -1;
+
+ swp_freq = fOSC * 1000 + afcerr ;
+ AFCEXEN = 1 ;
+ if (state->srate >= 1500)
+ smrt_d = state->srate / 3;
+ else
+ smrt_d = state->srate / 2;
+ smrt_info_get(state, smrt_d);
+ if (smrt_set(state, smrt_d) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ R = vco_dev_get(state, smrt_d);
+ if (DAGC_data_set(state, 2, 0) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ for (i = 0; i < 3; i++) {
+ temp_freq = swp_freq + (i - 1) * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[i] = dagcm_val_get(state);
+ }
+ if ((dagcm[0] > dagcm[1]) &&
+ (dagcm[0] > dagcm[2]) &&
+ (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) {
+
+ temp_freq = swp_freq - 2 * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[3] = dagcm_val_get(state);
+ if (dagcm[3] > dagcm[1])
+ delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300;
+ else
+ delta_freq = 0;
+ } else if ((dagcm[2] > dagcm[1]) &&
+ (dagcm[2] > dagcm[0]) &&
+ (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) {
+
+ temp_freq = swp_freq + 2 * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[3] = dagcm_val_get(state);
+ if (dagcm[3] > dagcm[1])
+ delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300;
+ else
+ delta_freq = 0 ;
+
+ } else {
+ delta_freq = 0 ;
+ }
+ dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq);
+ swp_freq += delta_freq;
+ dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq);
+ if (ABS(state->frequency * 1000 - swp_freq) > 3800) {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !");
+ } else {
+
+ S1T = 0;
+ S0T = 3;
+ CREN = 1;
+ AFCEN = 0;
+ AFCEXEN = 1;
+
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1;
+ }
+ if (DAGC_data_set(state, 0, 0) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ R = vco_dev_get(state, state->srate);
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ wait_t = 7 + (10000 + state->srate / 2) / state->srate;
+ if (wait_t == 0)
+ wait_t = 1;
+ msleep_interruptible(wait_t);
+ if (mb86a16_read(state, 0x37, &SIG1) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ if (SIG1 > 110) {
+ S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6;
+ wait_t = 7 + (917504 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 105) {
+ S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1048576 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 85) {
+ S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1310720 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 65) {
+ S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1572864 + state->srate / 2) / state->srate;
+ } else {
+ S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (2097152 + state->srate / 2) / state->srate;
+ }
+ wait_t *= 2; /* FOS */
+ S2T_set(state, S2T);
+ S45T_set(state, S4T, S5T);
+ Vi_set(state, ETH, VIA);
+ srst(state);
+ msleep_interruptible(wait_t);
+ sync = sync_chk(state, &VIRM);
+ dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync);
+ if (VIRM) {
+ if (VIRM == 4) {
+ /* 5/6 */
+ if (SIG1 > 110)
+ wait_t = (786432 + state->srate / 2) / state->srate;
+ else
+ wait_t = (1572864 + state->srate / 2) / state->srate;
+ if (state->srate < 5000)
+ /* FIXME ! , should be a long wait ! */
+ msleep_interruptible(wait_t);
+ else
+ msleep_interruptible(wait_t);
+
+ if (sync_chk(state, &junk) == 0) {
+ iq_vt_set(state, 1);
+ FEC_srst(state);
+ }
+ }
+ /* 1/2, 2/3, 3/4, 7/8 */
+ if (SIG1 > 110)
+ wait_t = (786432 + state->srate / 2) / state->srate;
+ else
+ wait_t = (1572864 + state->srate / 2) / state->srate;
+ msleep_interruptible(wait_t);
+ SEQ_set(state, 1);
+ } else {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC");
+ SEQ_set(state, 1);
+ ret = -1;
+ }
+ }
+ } else {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL");
+ ret = -1;
+ }
+
+ sync = sync_chk(state, &junk);
+ if (sync) {
+ dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******");
+ freqerr_chk(state, state->frequency, state->srate, 1);
+ ret = 0;
+ break;
+ }
+ }
+
+ mb86a16_read(state, 0x15, &agcval);
+ mb86a16_read(state, 0x26, &cnmval);
+ dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
+
+ return ret;
+}
+
+static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ int i;
+ u8 regs;
+
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
+ goto err;
+
+ regs = 0x18;
+
+ if (cmd->msg_len > 5 || cmd->msg_len < 4)
+ return -EINVAL;
+
+ for (i = 0; i < cmd->msg_len; i++) {
+ if (mb86a16_write(state, regs, cmd->msg[i]) < 0)
+ goto err;
+
+ regs++;
+ }
+ i += 0x90;
+
+ msleep_interruptible(10);
+
+ if (mb86a16_write(state, MB86A16_DCC1, i) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ switch (burst) {
+ case SEC_MINI_A:
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_TBEN |
+ MB86A16_DCC1_TBO) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ case SEC_MINI_B:
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_TBEN) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ }
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_CTOE) < 0)
+
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ case SEC_TONE_OFF:
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
+ goto err;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ state->frequency = p->frequency / 1000;
+ state->srate = p->u.qpsk.symbol_rate / 1000;
+
+ if (!mb86a16_set_fe(state)) {
+ dprintk(verbose, MB86A16_ERROR, 1, "Succesfully acquired LOCK");
+ return DVBFE_ALGO_SEARCH_SUCCESS;
+ }
+
+ dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!");
+ return DVBFE_ALGO_SEARCH_FAILED;
+}
+
+static void mb86a16_release(struct dvb_frontend *fe)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static int mb86a16_init(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int mb86a16_sleep(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst;
+ u32 timer;
+
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *ber = 0;
+ if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2)
+ goto err;
+ /* BER monitor invalid when BER_EN = 0 */
+ if (ber_mon & 0x04) {
+ /* coarse, fast calculation */
+ *ber = ber_tab & 0x1f;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber);
+ if (ber_mon & 0x01) {
+ /*
+ * BER_SEL = 1, The monitored BER is the estimated
+ * value with a Reed-Solomon decoder error amount at
+ * the deinterleaver output.
+ * monitored BER is expressed as a 20 bit output in total
+ */
+ ber_rst = ber_mon >> 3;
+ *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
+ if (ber_rst == 0)
+ timer = 12500000;
+ if (ber_rst == 1)
+ timer = 25000000;
+ if (ber_rst == 2)
+ timer = 50000000;
+ if (ber_rst == 3)
+ timer = 100000000;
+
+ *ber /= timer;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
+ } else {
+ /*
+ * BER_SEL = 0, The monitored BER is the estimated
+ * value with a Viterbi decoder error amount at the
+ * QPSK demodulator output.
+ * monitored BER is expressed as a 24 bit output in total
+ */
+ ber_tim = ber_mon >> 1;
+ *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
+ if (ber_tim == 0)
+ timer = 16;
+ if (ber_tim == 1)
+ timer = 24;
+
+ *ber /= 2 ^ timer;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
+ }
+ }
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ u8 agcm = 0;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *strength = 0;
+ if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ *strength = ((0xff - agcm) * 100) / 256;
+ dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength);
+ *strength = (0xffff - 0xff) + agcm;
+
+ return 0;
+}
+
+struct cnr {
+ u8 cn_reg;
+ u8 cn_val;
+};
+
+static const struct cnr cnr_tab[] = {
+ { 35, 2 },
+ { 40, 3 },
+ { 50, 4 },
+ { 60, 5 },
+ { 70, 6 },
+ { 80, 7 },
+ { 92, 8 },
+ { 103, 9 },
+ { 115, 10 },
+ { 138, 12 },
+ { 162, 15 },
+ { 180, 18 },
+ { 185, 19 },
+ { 189, 20 },
+ { 195, 22 },
+ { 199, 24 },
+ { 201, 25 },
+ { 202, 26 },
+ { 203, 27 },
+ { 205, 28 },
+ { 208, 30 }
+};
+
+static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ int i = 0;
+ int low_tide = 2, high_tide = 30, q_level;
+ u8 cn;
+
+ *snr = 0;
+ if (mb86a16_read(state, 0x26, &cn) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) {
+ if (cn < cnr_tab[i].cn_reg) {
+ *snr = cnr_tab[i].cn_val;
+ break;
+ }
+ }
+ q_level = (*snr * 100) / (high_tide - low_tide);
+ dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level);
+ *snr = (0xffff - 0xff) + *snr;
+
+ return 0;
+}
+
+static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ u8 dist;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+ *ucblocks = dist;
+
+ return 0;
+}
+
+static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_CUSTOM;
+}
+
+static struct dvb_frontend_ops mb86a16_ops = {
+ .info = {
+ .name = "Fujitsu MB86A16 DVB-S",
+ .type = FE_QPSK,
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 3000,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .symbol_rate_tolerance = 500,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 | FE_CAN_QPSK |
+ FE_CAN_FEC_AUTO
+ },
+ .release = mb86a16_release,
+
+ .get_frontend_algo = mb86a16_frontend_algo,
+ .search = mb86a16_search,
+ .read_status = mb86a16_read_status,
+ .init = mb86a16_init,
+ .sleep = mb86a16_sleep,
+ .read_status = mb86a16_read_status,
+
+ .read_ber = mb86a16_read_ber,
+ .read_signal_strength = mb86a16_read_signal_strength,
+ .read_snr = mb86a16_read_snr,
+ .read_ucblocks = mb86a16_read_ucblocks,
+
+ .diseqc_send_master_cmd = mb86a16_send_diseqc_msg,
+ .diseqc_send_burst = mb86a16_send_diseqc_burst,
+ .set_tone = mb86a16_set_tone,
+};
+
+struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ u8 dev_id = 0;
+ struct mb86a16_state *state = NULL;
+
+ state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ state->config = config;
+ state->i2c_adap = i2c_adap;
+
+ mb86a16_read(state, 0x7f, &dev_id);
+ if (dev_id != 0xfe)
+ goto error;
+
+ memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+ state->frontend.ops.set_voltage = state->config->set_voltage;
+
+ return &state->frontend;
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(mb86a16_attach);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb/frontends/mb86a16.h b/drivers/media/dvb/frontends/mb86a16.h
new file mode 100644
index 0000000..6ea8c37
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.h
@@ -0,0 +1,52 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MB86A16_H
+#define __MB86A16_H
+
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+
+
+struct mb86a16_config {
+ u8 demod_address;
+
+ int (*set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
+};
+
+
+
+#if defined(CONFIG_DVB_MB86A16) || (defined(CONFIG_DVB_MB86A16_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap);
+
+#else
+
+static inline struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_MB86A16 */
+
+#endif /* __MB86A16_H */
diff --git a/drivers/media/dvb/frontends/mb86a16_priv.h b/drivers/media/dvb/frontends/mb86a16_priv.h
new file mode 100644
index 0000000..360a35a
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16_priv.h
@@ -0,0 +1,151 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MB86A16_PRIV_H
+#define __MB86A16_PRIV_H
+
+#define MB86A16_TSOUT 0x00
+#define MB86A16_TSOUT_HIZSEL (0x01 << 5)
+#define MB86A16_TSOUT_HIZCNTI (0x01 << 4)
+#define MB86A16_TSOUT_MODE (0x01 << 3)
+#define MB86A16_TSOUT_ORDER (0x01 << 2)
+#define MB86A16_TSOUT_ERROR (0x01 << 1)
+#define Mb86A16_TSOUT_EDGE (0x01 << 0)
+
+#define MB86A16_FEC 0x01
+#define MB86A16_FEC_FSYNC (0x01 << 5)
+#define MB86A16_FEC_PCKB8 (0x01 << 4)
+#define MB86A16_FEC_DVDS (0x01 << 3)
+#define MB86A16_FEC_EREN (0x01 << 2)
+#define Mb86A16_FEC_RSEN (0x01 << 1)
+#define MB86A16_FEC_DIEN (0x01 << 0)
+
+#define MB86A16_AGC 0x02
+#define MB86A16_AGC_AGMD (0x01 << 6)
+#define MB86A16_AGC_AGCW (0x0f << 2)
+#define MB86A16_AGC_AGCP (0x01 << 1)
+#define MB86A16_AGC_AGCR (0x01 << 0)
+
+#define MB86A16_SRATE1 0x03
+#define MB86A16_SRATE1_DECI (0x07 << 2)
+#define MB86A16_SRATE1_CSEL (0x01 << 1)
+#define MB86A16_SRATE1_RSEL (0x01 << 0)
+
+#define MB86A16_SRATE2 0x04
+#define MB86A16_SRATE2_STOFSL (0xff << 0)
+
+#define MB86A16_SRATE3 0x05
+#define MB86A16_SRATE2_STOFSH (0xff << 0)
+
+#define MB86A16_VITERBI 0x06
+#define MB86A16_FRAMESYNC 0x07
+#define MB86A16_CRLFILTCOEF1 0x08
+#define MB86A16_CRLFILTCOEF2 0x09
+#define MB86A16_STRFILTCOEF1 0x0a
+#define MB86A16_STRFILTCOEF2 0x0b
+#define MB86A16_RESET 0x0c
+#define MB86A16_STATUS 0x0d
+#define MB86A16_AFCML 0x0e
+#define MB86A16_AFCMH 0x0f
+#define MB86A16_BERMON 0x10
+#define MB86A16_BERTAB 0x11
+#define MB86A16_BERLSB 0x12
+#define MB86A16_BERMID 0x13
+#define MB86A16_BERMSB 0x14
+#define MB86A16_AGCM 0x15
+
+#define MB86A16_DCC1 0x16
+#define MB86A16_DCC1_DISTA (0x01 << 7)
+#define MB86A16_DCC1_PRTY (0x01 << 6)
+#define MB86A16_DCC1_CTOE (0x01 << 5)
+#define MB86A16_DCC1_TBEN (0x01 << 4)
+#define MB86A16_DCC1_TBO (0x01 << 3)
+#define MB86A16_DCC1_NUM (0x07 << 0)
+
+#define MB86A16_DCC2 0x17
+#define MB86A16_DCC2_DCBST (0x01 << 0)
+
+#define MB86A16_DCC3 0x18
+#define MB86A16_DCC3_CODE0 (0xff << 0)
+
+#define MB86A16_DCC4 0x19
+#define MB86A16_DCC4_CODE1 (0xff << 0)
+
+#define MB86A16_DCC5 0x1a
+#define MB86A16_DCC5_CODE2 (0xff << 0)
+
+#define MB86A16_DCC6 0x1b
+#define MB86A16_DCC6_CODE3 (0xff << 0)
+
+#define MB86A16_DCC7 0x1c
+#define MB86A16_DCC7_CODE4 (0xff << 0)
+
+#define MB86A16_DCC8 0x1d
+#define MB86A16_DCC8_CODE5 (0xff << 0)
+
+#define MB86A16_DCCOUT 0x1e
+#define MB86A16_DCCOUT_DISEN (0x01 << 0)
+
+#define MB86A16_TONEOUT1 0x1f
+#define MB86A16_TONE_TDIVL (0xff << 0)
+
+#define MB86A16_TONEOUT2 0x20
+#define MB86A16_TONE_TMD (0x03 << 2)
+#define MB86A16_TONE_TDIVH (0x03 << 0)
+
+#define MB86A16_FREQ1 0x21
+#define MB86A16_FREQ2 0x22
+#define MB86A16_FREQ3 0x23
+#define MB86A16_FREQ4 0x24
+#define MB86A16_FREQSET 0x25
+#define MB86A16_CNM 0x26
+#define MB86A16_PORT0 0x27
+#define MB86A16_PORT1 0x28
+#define MB86A16_DRCFILT 0x29
+#define MB86A16_AFC 0x2a
+#define MB86A16_AFCEXL 0x2b
+#define MB86A16_AFCEXH 0x2c
+#define MB86A16_DAGC 0x2d
+#define MB86A16_SEQMODE 0x32
+#define MB86A16_S0S1T 0x33
+#define MB86A16_S2S3T 0x34
+#define MB86A16_S4S5T 0x35
+#define MB86A16_CNTMR 0x36
+#define MB86A16_SIG1 0x37
+#define MB86A16_SIG2 0x38
+#define MB86A16_VIMAG 0x39
+#define MB86A16_VISET1 0x3a
+#define MB86A16_VISET2 0x3b
+#define MB86A16_VISET3 0x3c
+#define MB86A16_FAGCS1 0x3d
+#define MB86A16_FAGCS2 0x3e
+#define MB86A16_FAGCS3 0x3f
+#define MB86A16_FAGCS4 0x40
+#define MB86A16_FAGCS5 0x41
+#define MB86A16_FAGCS6 0x42
+#define MB86A16_CRM 0x43
+#define MB86A16_STRM 0x44
+#define MB86A16_DAGCML 0x45
+#define MB86A16_DAGCMH 0x46
+#define MB86A16_QPSKTST 0x49
+#define MB86A16_DISTMON 0x52
+#define MB86A16_VERSION 0x7f
+
+#endif /* __MB86A16_PRIV_H */
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 6c1dbf9..6ca533e 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -426,6 +426,10 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
id = tda10021_readreg(state, 0x1a);
if ((id & 0xf0) != 0x70) goto error;
+ /* Don't claim TDA10023 */
+ if (id == 0x7d)
+ goto error;
+
printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n",
state->config->demod_address, id);
diff --git a/drivers/media/dvb/frontends/tda665x.c b/drivers/media/dvb/frontends/tda665x.c
new file mode 100644
index 0000000..87d5273
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.c
@@ -0,0 +1,257 @@
+/*
+ TDA665x tuner driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dvb_frontend.h"
+#include "tda665x.h"
+
+struct tda665x_state {
+ struct dvb_frontend *fe;
+ struct i2c_adapter *i2c;
+ const struct tda665x_config *config;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+static int tda665x_read(struct tda665x_state *state, u8 *buf)
+{
+ const struct tda665x_config *config = state->config;
+ int err = 0;
+ struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD, .buf = buf, .len = 2 };
+
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1)
+ goto exit;
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
+ return err;
+}
+
+static int tda665x_write(struct tda665x_state *state, u8 *buf, u8 length)
+{
+ const struct tda665x_config *config = state->config;
+ int err = 0;
+ struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = length };
+
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1)
+ goto exit;
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
+ return err;
+}
+
+static int tda665x_get_state(struct dvb_frontend *fe,
+ enum tuner_param param,
+ struct tuner_state *tstate)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ int err = 0;
+
+ switch (param) {
+ case DVBFE_TUNER_FREQUENCY:
+ tstate->frequency = state->frequency;
+ break;
+ case DVBFE_TUNER_BANDWIDTH:
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int tda665x_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ u8 result = 0;
+ int err = 0;
+
+ *status = 0;
+
+ err = tda665x_read(state, &result);
+ if (err < 0)
+ goto exit;
+
+ if ((result >> 6) & 0x01) {
+ printk(KERN_DEBUG "%s: Tuner Phase Locked\n", __func__);
+ *status = 1;
+ }
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error\n", __func__);
+ return err;
+}
+
+static int tda665x_set_state(struct dvb_frontend *fe,
+ enum tuner_param param,
+ struct tuner_state *tstate)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ const struct tda665x_config *config = state->config;
+ u32 frequency, status = 0;
+ u8 buf[4];
+ int err = 0;
+
+ if (param & DVBFE_TUNER_FREQUENCY) {
+
+ frequency = tstate->frequency;
+ if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) {
+ printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency);
+ return -EINVAL;
+ }
+
+ frequency += config->frequency_offst;
+ frequency *= config->ref_multiplier;
+ frequency += config->ref_divider >> 1;
+ frequency /= config->ref_divider;
+
+ buf[0] = (u8) (frequency & 0x7f00) >> 8;
+ buf[1] = (u8) (frequency & 0x00ff) >> 0;
+ buf[2] = 0x80 | 0x40 | 0x02;
+ buf[3] = 0x00;
+
+ /* restore frequency */
+ frequency = tstate->frequency;
+
+ if (frequency < 153000000) {
+ /* VHF-L */
+ buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */
+ if (frequency < 68000000)
+ buf[3] |= 0x40; /* 83uA */
+ if (frequency < 1040000000)
+ buf[3] |= 0x60; /* 122uA */
+ if (frequency < 1250000000)
+ buf[3] |= 0x80; /* 163uA */
+ else
+ buf[3] |= 0xa0; /* 254uA */
+ } else if (frequency < 438000000) {
+ /* VHF-H */
+ buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */
+ if (frequency < 230000000)
+ buf[3] |= 0x40;
+ if (frequency < 300000000)
+ buf[3] |= 0x60;
+ else
+ buf[3] |= 0x80;
+ } else {
+ /* UHF */
+ buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */
+ if (frequency < 470000000)
+ buf[3] |= 0x60;
+ if (frequency < 526000000)
+ buf[3] |= 0x80;
+ else
+ buf[3] |= 0xa0;
+ }
+
+ /* Set params */
+ err = tda665x_write(state, buf, 5);
+ if (err < 0)
+ goto exit;
+
+ /* sleep for some time */
+ printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__);
+ msleep(20);
+ /* check status */
+ err = tda665x_get_status(fe, &status);
+ if (err < 0)
+ goto exit;
+
+ if (status == 1) {
+ printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status);
+ state->frequency = frequency; /* cache successful state */
+ } else {
+ printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status);
+ }
+ } else {
+ printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
+ return -EINVAL;
+ }
+
+ return 0;
+exit:
+ printk(KERN_ERR "%s: I/O Error\n", __func__);
+ return err;
+}
+
+static int tda665x_release(struct dvb_frontend *fe)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+
+ fe->tuner_priv = NULL;
+ kfree(state);
+ return 0;
+}
+
+static struct dvb_tuner_ops tda665x_ops = {
+
+ .set_state = tda665x_set_state,
+ .get_state = tda665x_get_state,
+ .get_status = tda665x_get_status,
+ .release = tda665x_release
+};
+
+struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct tda665x_state *state = NULL;
+ struct dvb_tuner_info *info;
+
+ state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL);
+ if (state == NULL)
+ goto exit;
+
+ state->config = config;
+ state->i2c = i2c;
+ state->fe = fe;
+ fe->tuner_priv = state;
+ fe->ops.tuner_ops = tda665x_ops;
+ info = &fe->ops.tuner_ops.info;
+
+ memcpy(info->name, config->name, sizeof(config->name));
+ info->frequency_min = config->frequency_min;
+ info->frequency_max = config->frequency_max;
+ info->frequency_step = config->frequency_offst;
+
+ printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
+
+ return fe;
+
+exit:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(tda665x_attach);
+
+MODULE_DESCRIPTION("TDA665x driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda665x.h b/drivers/media/dvb/frontends/tda665x.h
new file mode 100644
index 0000000..ec7927a
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.h
@@ -0,0 +1,52 @@
+/*
+ TDA665x tuner driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __TDA665x_H
+#define __TDA665x_H
+
+struct tda665x_config {
+ char name[128];
+
+ u8 addr;
+ u32 frequency_min;
+ u32 frequency_max;
+ u32 frequency_offst;
+ u32 ref_multiplier;
+ u32 ref_divider;
+};
+
+#if defined(CONFIG_DVB_TDA665x) || (defined(CONFIG_DVB_TDA665x_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c);
+
+#else
+
+static inline struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_TDA665x */
+
+#endif /* __TDA665x_H */
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
new file mode 100644
index 0000000..f7b72a3
--- /dev/null
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -0,0 +1,32 @@
+config MANTIS_CORE
+ tristate "Mantis/Hopper PCI bridge based devices"
+ depends on PCI && I2C && INPUT
+
+ help
+ Support for PCI cards based on the Mantis and Hopper PCi bridge.
+
+ Say Y if you own such a device and want to use it.
+
+config DVB_MANTIS
+ tristate "MANTIS based cards"
+ depends on MANTIS_CORE && DVB_CORE && PCI && I2C
+ select DVB_MB86A16
+ select DVB_ZL10353
+ select DVB_STV0299
+ select DVB_PLL
+ help
+ Support for PCI cards based on the Mantis PCI bridge.
+ Say Y when you have a Mantis based DVB card and want to use it.
+
+ If unsure say N.
+
+config DVB_HOPPER
+ tristate "HOPPER based cards"
+ depends on MANTIS_CORE && DVB_CORE && PCI && I2C
+ select DVB_ZL10353
+ select DVB_PLL
+ help
+ Support for PCI cards based on the Hopper PCI bridge.
+ Say Y when you have a Hopper based DVB card and want to use it.
+
+ If unsure say N
diff --git a/drivers/media/dvb/mantis/Makefile b/drivers/media/dvb/mantis/Makefile
new file mode 100644
index 0000000..98dc5cd
--- /dev/null
+++ b/drivers/media/dvb/mantis/Makefile
@@ -0,0 +1,28 @@
+mantis_core-objs := mantis_ioc.o \
+ mantis_uart.o \
+ mantis_dma.o \
+ mantis_pci.o \
+ mantis_i2c.o \
+ mantis_dvb.o \
+ mantis_evm.o \
+ mantis_hif.o \
+ mantis_ca.o \
+ mantis_pcmcia.o \
+ mantis_input.o
+
+mantis-objs := mantis_cards.o \
+ mantis_vp1033.o \
+ mantis_vp1034.o \
+ mantis_vp1041.o \
+ mantis_vp2033.o \
+ mantis_vp2040.o \
+ mantis_vp3030.o
+
+hopper-objs := hopper_cards.o \
+ hopper_vp3028.o
+
+obj-$(CONFIG_MANTIS_CORE) += mantis_core.o
+obj-$(CONFIG_DVB_MANTIS) += mantis.o
+obj-$(CONFIG_DVB_HOPPER) += hopper.o
+
+EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
new file mode 100644
index 0000000..d073c61
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -0,0 +1,275 @@
+/*
+ Hopper PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "hopper_vp3028.h"
+#include "mantis_dma.h"
+#include "mantis_dvb.h"
+#include "mantis_uart.h"
+#include "mantis_ioc.h"
+#include "mantis_pci.h"
+#include "mantis_i2c.h"
+#include "mantis_reg.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+
+#define DRIVER_NAME "Hopper"
+
+static char *label[10] = {
+ "DMA",
+ "IRQ-0",
+ "IRQ-1",
+ "OCERR",
+ "PABRT",
+ "RIPRR",
+ "PPERR",
+ "FTRGT",
+ "RISCI",
+ "RACK"
+};
+
+static int devs;
+
+static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
+{
+ u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 rst_stat = 0, rst_mask = 0;
+
+ struct mantis_pci *mantis;
+ struct mantis_ca *ca;
+
+ mantis = (struct mantis_pci *) dev_id;
+ if (unlikely(mantis == NULL)) {
+ dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ return IRQ_NONE;
+ }
+ ca = mantis->mantis_ca;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ if (!(stat & mask))
+ return IRQ_NONE;
+
+ rst_mask = MANTIS_GPIF_WRACK |
+ MANTIS_GPIF_OTHERR |
+ MANTIS_SBUF_WSTO |
+ MANTIS_GPIF_EXTIRQ;
+
+ rst_stat = mmread(MANTIS_GPIF_STATUS);
+ rst_stat &= rst_mask;
+ mmwrite(rst_stat, MANTIS_GPIF_STATUS);
+
+ mantis->mantis_int_stat = stat;
+ mantis->mantis_int_mask = mask;
+ dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
+ if (stat & MANTIS_INT_RISCEN) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
+ }
+ if (stat & MANTIS_INT_IRQ0) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
+ mantis->gpif_status = rst_stat;
+ wake_up(&ca->hif_write_wq);
+ schedule_work(&ca->hif_evm_work);
+ }
+ if (stat & MANTIS_INT_IRQ1) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
+ schedule_work(&mantis->uart_work);
+ }
+ if (stat & MANTIS_INT_OCERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
+ }
+ if (stat & MANTIS_INT_PABORT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
+ }
+ if (stat & MANTIS_INT_RIPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
+ }
+ if (stat & MANTIS_INT_PPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
+ }
+ if (stat & MANTIS_INT_FTRGT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
+ }
+ if (stat & MANTIS_INT_RISCI) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
+ mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ tasklet_schedule(&mantis->tasklet);
+ }
+ if (stat & MANTIS_INT_I2CDONE) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
+ wake_up(&mantis->i2c_wq);
+ }
+ mmwrite(stat, MANTIS_INT_STAT);
+ stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
+ MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
+ MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
+ MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
+ MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
+ MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
+ MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
+ MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
+ MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
+ MANTIS_INT_RISCI);
+
+ if (stat)
+ dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+ return IRQ_HANDLED;
+}
+
+static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+ int err = 0;
+
+ mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
+ if (mantis == NULL) {
+ printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ mantis->num = devs;
+ mantis->verbose = verbose;
+ mantis->pdev = pdev;
+ config = (struct mantis_hwconfig *) pci_id->driver_data;
+ config->irq_handler = &hopper_irq_handler;
+ mantis->hwconfig = config;
+
+ err = mantis_pci_init(mantis);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_stream_control(mantis, STREAM_TO_HIF);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_get_mac(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
+ goto fail3;
+ }
+
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
+ goto fail4;
+ }
+ devs++;
+
+ return err;
+
+fail4:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
+ mantis_dma_exit(mantis);
+
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
+ mantis_i2c_exit(mantis);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
+ mantis_pci_exit(mantis);
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
+ kfree(mantis);
+
+fail0:
+ return err;
+}
+
+static void __devexit hopper_pci_remove(struct pci_dev *pdev)
+{
+ struct mantis_pci *mantis = pci_get_drvdata(pdev);
+
+ if (mantis) {
+ mantis_dvb_exit(mantis);
+ mantis_dma_exit(mantis);
+ mantis_i2c_exit(mantis);
+ mantis_pci_exit(mantis);
+ kfree(mantis);
+ }
+ return;
+
+}
+
+static struct pci_device_id hopper_pci_table[] = {
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config),
+ { }
+};
+
+static struct pci_driver hopper_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = hopper_pci_table,
+ .probe = hopper_pci_probe,
+ .remove = hopper_pci_remove,
+};
+
+static int __devinit hopper_init(void)
+{
+ return pci_register_driver(&hopper_pci_driver);
+}
+
+static void __devexit hopper_exit(void)
+{
+ return pci_unregister_driver(&hopper_pci_driver);
+}
+
+module_init(hopper_init);
+module_exit(hopper_exit);
+
+MODULE_DESCRIPTION("HOPPER driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.c b/drivers/media/dvb/mantis/hopper_vp3028.c
new file mode 100644
index 0000000..96674c7
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.c
@@ -0,0 +1,88 @@
+/*
+ Hopper VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "zl10353.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "hopper_vp3028.h"
+
+struct zl10353_config hopper_vp3028_config = {
+ .demod_address = 0x0f,
+};
+
+#define MANTIS_MODEL_NAME "VP-3028"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int err = 0;
+
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ err = mantis_frontend_power(mantis, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ msleep(250);
+ dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
+ fe = zl10353_attach(&hopper_vp3028_config, adapter);
+
+ if (!fe)
+ return -1;
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp3028_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp3028_frontend_init,
+ .power = GPIF_A00,
+ .reset = GPIF_A03,
+};
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.h b/drivers/media/dvb/mantis/hopper_vp3028.h
new file mode 100644
index 0000000..5723949
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.h
@@ -0,0 +1,30 @@
+/*
+ Hopper VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3028_H
+#define __MANTIS_VP3028_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_3028_DVB_T 0x0028
+
+extern struct mantis_hwconfig vp3028_config;
+
+#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_ca.c b/drivers/media/dvb/mantis/mantis_ca.c
new file mode 100644
index 0000000..403ce04
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.c
@@ -0,0 +1,207 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h"
+#include "mantis_hif.h"
+#include "mantis_reg.h"
+
+#include "mantis_ca.h"
+
+static int mantis_ca_read_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Read", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_read_mem(ca, addr);
+}
+
+static int mantis_ca_write_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Write", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_write_mem(ca, addr, data);
+}
+
+static int mantis_ca_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Read", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_read_iom(ca, addr);
+}
+
+static int mantis_ca_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Write", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_write_iom(ca, addr, data);
+}
+
+static int mantis_ca_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot RESET", slot);
+ udelay(500); /* Wait.. */
+ mmwrite(0xda, MANTIS_PCMCIA_RESET); /* Leading edge assert */
+ udelay(500);
+ mmwrite(0x00, MANTIS_PCMCIA_RESET); /* Trailing edge deassert */
+ msleep(1000);
+ dvb_ca_en50221_camready_irq(&ca->en50221, 0);
+
+ return 0;
+}
+
+static int mantis_ca_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot shutdown", slot);
+
+ return 0;
+}
+
+static int mantis_ts_control(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): TS control", slot);
+/* mantis_set_direction(mantis, 1); */ /* Enable TS through CAM */
+
+ return 0;
+}
+
+static int mantis_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Poll Slot status", slot);
+
+ if (ca->slot_state == MODULE_INSERTED) {
+ dprintk(MANTIS_DEBUG, 1, "CA Module present and ready");
+ return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
+ } else {
+ dprintk(MANTIS_DEBUG, 1, "CA Module not present or not ready");
+ }
+
+ return 0;
+}
+
+int mantis_ca_init(struct mantis_pci *mantis)
+{
+ struct dvb_adapter *dvb_adapter = &mantis->dvb_adapter;
+ struct mantis_ca *ca;
+ int ca_flags = 0, result;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing Mantis CA");
+ ca = kzalloc(sizeof(struct mantis_ca), GFP_KERNEL);
+ if (!ca) {
+ dprintk(MANTIS_ERROR, 1, "Out of memory!, exiting ..");
+ result = -ENOMEM;
+ goto err;
+ }
+
+ ca->ca_priv = mantis;
+ mantis->mantis_ca = ca;
+ ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE;
+ /* register CA interface */
+ ca->en50221.owner = THIS_MODULE;
+ ca->en50221.read_attribute_mem = mantis_ca_read_attr_mem;
+ ca->en50221.write_attribute_mem = mantis_ca_write_attr_mem;
+ ca->en50221.read_cam_control = mantis_ca_read_cam_ctl;
+ ca->en50221.write_cam_control = mantis_ca_write_cam_ctl;
+ ca->en50221.slot_reset = mantis_ca_slot_reset;
+ ca->en50221.slot_shutdown = mantis_ca_slot_shutdown;
+ ca->en50221.slot_ts_enable = mantis_ts_control;
+ ca->en50221.poll_slot_status = mantis_slot_status;
+ ca->en50221.data = ca;
+
+ mutex_init(&ca->ca_lock);
+
+ init_waitqueue_head(&ca->hif_data_wq);
+ init_waitqueue_head(&ca->hif_opdone_wq);
+ init_waitqueue_head(&ca->hif_write_wq);
+
+ dprintk(MANTIS_ERROR, 1, "Registering EN50221 device");
+ result = dvb_ca_en50221_init(dvb_adapter, &ca->en50221, ca_flags, 1);
+ if (result != 0) {
+ dprintk(MANTIS_ERROR, 1, "EN50221: Initialization failed <%d>", result);
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1, "Registered EN50221 device");
+ mantis_evmgr_init(ca);
+ return 0;
+err:
+ kfree(ca);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mantis_ca_init);
+
+void mantis_ca_exit(struct mantis_pci *mantis)
+{
+ struct mantis_ca *ca = mantis->mantis_ca;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
+
+ mantis_evmgr_exit(ca);
+ dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
+ if (ca)
+ dvb_ca_en50221_release(&ca->en50221);
+
+ kfree(ca);
+}
+EXPORT_SYMBOL_GPL(mantis_ca_exit);
diff --git a/drivers/media/dvb/mantis/mantis_ca.h b/drivers/media/dvb/mantis/mantis_ca.h
new file mode 100644
index 0000000..dc63e55
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.h
@@ -0,0 +1,27 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_CA_H
+#define __MANTIS_CA_H
+
+extern int mantis_ca_init(struct mantis_pci *mantis);
+extern void mantis_ca_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_CA_H */
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
new file mode 100644
index 0000000..16f1708
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -0,0 +1,305 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+
+#include "mantis_vp1033.h"
+#include "mantis_vp1034.h"
+#include "mantis_vp1041.h"
+#include "mantis_vp2033.h"
+#include "mantis_vp2040.h"
+#include "mantis_vp3030.h"
+
+#include "mantis_dma.h"
+#include "mantis_ca.h"
+#include "mantis_dvb.h"
+#include "mantis_uart.h"
+#include "mantis_ioc.h"
+#include "mantis_pci.h"
+#include "mantis_i2c.h"
+#include "mantis_reg.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+
+static int devs;
+
+#define DRIVER_NAME "Mantis"
+
+static char *label[10] = {
+ "DMA",
+ "IRQ-0",
+ "IRQ-1",
+ "OCERR",
+ "PABRT",
+ "RIPRR",
+ "PPERR",
+ "FTRGT",
+ "RISCI",
+ "RACK"
+};
+
+static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
+{
+ u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 rst_stat = 0, rst_mask = 0;
+
+ struct mantis_pci *mantis;
+ struct mantis_ca *ca;
+
+ mantis = (struct mantis_pci *) dev_id;
+ if (unlikely(mantis == NULL)) {
+ dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ return IRQ_NONE;
+ }
+ ca = mantis->mantis_ca;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ if (!(stat & mask))
+ return IRQ_NONE;
+
+ rst_mask = MANTIS_GPIF_WRACK |
+ MANTIS_GPIF_OTHERR |
+ MANTIS_SBUF_WSTO |
+ MANTIS_GPIF_EXTIRQ;
+
+ rst_stat = mmread(MANTIS_GPIF_STATUS);
+ rst_stat &= rst_mask;
+ mmwrite(rst_stat, MANTIS_GPIF_STATUS);
+
+ mantis->mantis_int_stat = stat;
+ mantis->mantis_int_mask = mask;
+ dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
+ if (stat & MANTIS_INT_RISCEN) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
+ }
+ if (stat & MANTIS_INT_IRQ0) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
+ mantis->gpif_status = rst_stat;
+ wake_up(&ca->hif_write_wq);
+ schedule_work(&ca->hif_evm_work);
+ }
+ if (stat & MANTIS_INT_IRQ1) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
+ schedule_work(&mantis->uart_work);
+ }
+ if (stat & MANTIS_INT_OCERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
+ }
+ if (stat & MANTIS_INT_PABORT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
+ }
+ if (stat & MANTIS_INT_RIPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
+ }
+ if (stat & MANTIS_INT_PPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
+ }
+ if (stat & MANTIS_INT_FTRGT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
+ }
+ if (stat & MANTIS_INT_RISCI) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
+ mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ tasklet_schedule(&mantis->tasklet);
+ }
+ if (stat & MANTIS_INT_I2CDONE) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
+ wake_up(&mantis->i2c_wq);
+ }
+ mmwrite(stat, MANTIS_INT_STAT);
+ stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
+ MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
+ MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
+ MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
+ MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
+ MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
+ MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
+ MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
+ MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
+ MANTIS_INT_RISCI);
+
+ if (stat)
+ dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+ return IRQ_HANDLED;
+}
+
+static int __devinit mantis_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+ int err = 0;
+
+ mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
+ if (mantis == NULL) {
+ printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ mantis->num = devs;
+ mantis->verbose = verbose;
+ mantis->pdev = pdev;
+ config = (struct mantis_hwconfig *) pci_id->driver_data;
+ config->irq_handler = &mantis_irq_handler;
+ mantis->hwconfig = config;
+
+ err = mantis_pci_init(mantis);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_stream_control(mantis, STREAM_TO_HIF);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_get_mac(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
+ goto fail3;
+ }
+
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
+ goto fail4;
+ }
+ err = mantis_uart_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART initialization failed <%d>", err);
+ goto fail6;
+ }
+
+ devs++;
+
+ return err;
+
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART exit! <%d>", err);
+ mantis_uart_exit(mantis);
+
+fail6:
+fail4:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
+ mantis_dma_exit(mantis);
+
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
+ mantis_i2c_exit(mantis);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
+ mantis_pci_exit(mantis);
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
+ kfree(mantis);
+
+fail0:
+ return err;
+}
+
+static void __devexit mantis_pci_remove(struct pci_dev *pdev)
+{
+ struct mantis_pci *mantis = pci_get_drvdata(pdev);
+
+ if (mantis) {
+
+ mantis_uart_exit(mantis);
+ mantis_dvb_exit(mantis);
+ mantis_dma_exit(mantis);
+ mantis_i2c_exit(mantis);
+ mantis_pci_exit(mantis);
+ kfree(mantis);
+ }
+ return;
+}
+
+static struct pci_device_id mantis_pci_table[] = {
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1033_DVB_S, &vp1033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1034_DVB_S, &vp1034_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1041_DVB_S2, &vp1041_config),
+ MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_10, &vp1041_config),
+ MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_20, &vp1041_config),
+ MAKE_ENTRY(TERRATEC, CINERGY_S2_PCI_HD, &vp1041_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2033_DVB_C, &vp2033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2040_DVB_C, &vp2040_config),
+ MAKE_ENTRY(TECHNISAT, CABLESTAR_HD2, &vp2040_config),
+ MAKE_ENTRY(TERRATEC, CINERGY_C, &vp2033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3030_DVB_T, &vp3030_config),
+ { }
+};
+
+static struct pci_driver mantis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = mantis_pci_table,
+ .probe = mantis_pci_probe,
+ .remove = mantis_pci_remove,
+};
+
+static int __devinit mantis_init(void)
+{
+ return pci_register_driver(&mantis_pci_driver);
+}
+
+static void __devexit mantis_exit(void)
+{
+ return pci_unregister_driver(&mantis_pci_driver);
+}
+
+module_init(mantis_init);
+module_exit(mantis_exit);
+
+MODULE_DESCRIPTION("MANTIS driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
new file mode 100644
index 0000000..d0b645a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -0,0 +1,179 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_COMMON_H
+#define __MANTIS_COMMON_H
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "mantis_uart.h"
+
+#include "mantis_link.h"
+
+#define MANTIS_ERROR 0
+#define MANTIS_NOTICE 1
+#define MANTIS_INFO 2
+#define MANTIS_DEBUG 3
+#define MANTIS_TMG 9
+
+#define dprintk(y, z, format, arg...) do { \
+ if (z) { \
+ if ((mantis->verbose > MANTIS_ERROR) && (mantis->verbose > y)) \
+ printk(KERN_ERR "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_NOTICE) && (mantis->verbose > y)) \
+ printk(KERN_NOTICE "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_INFO) && (mantis->verbose > y)) \
+ printk(KERN_INFO "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_DEBUG) && (mantis->verbose > y)) \
+ printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_TMG) && (mantis->verbose > y)) \
+ printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ } else { \
+ if (mantis->verbose > y) \
+ printk(format , ##arg); \
+ } \
+} while(0)
+
+#define mwrite(dat, addr) writel((dat), addr)
+#define mread(addr) readl(addr)
+
+#define mmwrite(dat, addr) mwrite((dat), (mantis->mmio + (addr)))
+#define mmread(addr) mread(mantis->mmio + (addr))
+
+#define MANTIS_TS_188 0
+#define MANTIS_TS_204 1
+
+#define TWINHAN_TECHNOLOGIES 0x1822
+#define MANTIS 0x4e35
+
+#define TECHNISAT 0x1ae4
+#define TERRATEC 0x153b
+
+#define MAKE_ENTRY(__subven, __subdev, __configptr) { \
+ .vendor = TWINHAN_TECHNOLOGIES, \
+ .device = MANTIS, \
+ .subvendor = (__subven), \
+ .subdevice = (__subdev), \
+ .driver_data = (unsigned long) (__configptr) \
+}
+
+enum mantis_i2c_mode {
+ MANTIS_PAGE_MODE = 0,
+ MANTIS_BYTE_MODE,
+};
+
+struct mantis_pci;
+
+struct mantis_hwconfig {
+ char *model_name;
+ char *dev_type;
+ u32 ts_size;
+
+ enum mantis_baud baud_rate;
+ enum mantis_parity parity;
+ u32 bytes;
+
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ int (*frontend_init)(struct mantis_pci *mantis, struct dvb_frontend *fe);
+
+ u8 power;
+ u8 reset;
+
+ enum mantis_i2c_mode i2c_mode;
+};
+
+struct mantis_pci {
+ unsigned int verbose;
+
+ /* PCI stuff */
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+
+ u8 latency;
+
+ struct pci_dev *pdev;
+
+ unsigned long mantis_addr;
+ void __iomem *mmio;
+
+ u8 irq;
+ u8 revision;
+
+ unsigned int num;
+
+ /* RISC Core */
+ u32 finished_block;
+ u32 last_block;
+ u32 line_bytes;
+ u32 line_count;
+ u32 risc_pos;
+ u8 *buf_cpu;
+ dma_addr_t buf_dma;
+ u32 *risc_cpu;
+ dma_addr_t risc_dma;
+
+ struct tasklet_struct tasklet;
+
+ struct i2c_adapter adapter;
+ int i2c_rc;
+ wait_queue_head_t i2c_wq;
+ struct mutex i2c_lock;
+
+ /* DVB stuff */
+ struct dvb_adapter dvb_adapter;
+ struct dvb_frontend *fe;
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend fe_hw;
+ struct dmx_frontend fe_mem;
+ struct dvb_net dvbnet;
+
+ u8 feeds;
+
+ struct mantis_hwconfig *hwconfig;
+
+ u32 mantis_int_stat;
+ u32 mantis_int_mask;
+
+ /* board specific */
+ u8 mac_address[8];
+ u32 sub_vendor_id;
+ u32 sub_device_id;
+
+ /* A12 A13 A14 */
+ u32 gpio_status;
+
+ u32 gpif_status;
+
+ struct mantis_ca *mantis_ca;
+
+ wait_queue_head_t uart_wq;
+ struct work_struct uart_work;
+ spinlock_t uart_lock;
+
+ struct input_dev *rc;
+};
+
+#define MANTIS_HIF_STATUS (mantis->gpio_status)
+
+#endif /* __MANTIS_COMMON_H */
diff --git a/drivers/media/dvb/mantis/mantis_core.c b/drivers/media/dvb/mantis/mantis_core.c
new file mode 100644
index 0000000..8113b23
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.c
@@ -0,0 +1,238 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "mantis_common.h"
+#include "mantis_core.h"
+#include "mantis_vp1033.h"
+#include "mantis_vp1034.h"
+#include "mantis_vp1041.h"
+#include "mantis_vp2033.h"
+#include "mantis_vp2040.h"
+#include "mantis_vp3030.h"
+
+static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
+{
+ int err;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .buf = data,
+ .len = 1
+ }, {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .buf = data,
+ .len = length
+ },
+ };
+
+ err = i2c_transfer(&mantis->adapter, msg, 2);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1,
+ "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
+ err, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
+{
+ int err;
+
+ struct i2c_msg msg = {
+ .addr = 0x50,
+ .flags = 0,
+ .buf = data,
+ .len = length
+ };
+
+ err = i2c_transfer(&mantis->adapter, &msg, 1);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1,
+ "ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >",
+ err, length, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int get_mac_address(struct mantis_pci *mantis)
+{
+ int err;
+
+ mantis->mac_address[0] = 0x08;
+ err = read_eeprom_byte(mantis, &mantis->mac_address[0], 6);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis EEPROM read error");
+
+ return err;
+ }
+ dprintk(verbose, MANTIS_ERROR, 0,
+ " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ mantis->mac_address[0], mantis->mac_address[1],
+ mantis->mac_address[2], mantis->mac_address[3],
+ mantis->mac_address[4], mantis->mac_address[5]);
+
+ return 0;
+}
+
+#define MANTIS_MODEL_UNKNOWN "UNKNOWN"
+#define MANTIS_DEV_UNKNOWN "UNKNOWN"
+
+struct mantis_hwconfig unknown_device = {
+ .model_name = MANTIS_MODEL_UNKNOWN,
+ .dev_type = MANTIS_DEV_UNKNOWN,
+};
+
+static void mantis_load_config(struct mantis_pci *mantis)
+{
+ switch (mantis->subsystem_device) {
+ case MANTIS_VP_1033_DVB_S: /* VP-1033 */
+ mantis->hwconfig = &vp1033_mantis_config;
+ break;
+ case MANTIS_VP_1034_DVB_S: /* VP-1034 */
+ mantis->hwconfig = &vp1034_mantis_config;
+ break;
+ case MANTIS_VP_1041_DVB_S2: /* VP-1041 */
+ case TECHNISAT_SKYSTAR_HD2:
+ mantis->hwconfig = &vp1041_mantis_config;
+ break;
+ case MANTIS_VP_2033_DVB_C: /* VP-2033 */
+ mantis->hwconfig = &vp2033_mantis_config;
+ break;
+ case MANTIS_VP_2040_DVB_C: /* VP-2040 */
+ case TERRATEC_CINERGY_C_PCI: /* VP-2040 clone */
+ case TECHNISAT_CABLESTAR_HD2:
+ mantis->hwconfig = &vp2040_mantis_config;
+ break;
+ case MANTIS_VP_3030_DVB_T: /* VP-3030 */
+ mantis->hwconfig = &vp3030_mantis_config;
+ break;
+ default:
+ mantis->hwconfig = &unknown_device;
+ break;
+ }
+}
+
+int mantis_core_init(struct mantis_pci *mantis)
+{
+ int err = 0;
+
+ mantis_load_config(mantis);
+ dprintk(verbose, MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
+ mantis->hwconfig->model_name, mantis->hwconfig->dev_type,
+ mantis->pdev->bus->number, PCI_SLOT(mantis->pdev->devfn), PCI_FUNC(mantis->pdev->devfn));
+ dprintk(verbose, MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
+ mantis->revision,
+ mantis->subsystem_vendor, mantis->subsystem_device);
+ dprintk(verbose, MANTIS_ERROR, 0,
+ "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
+ mantis->pdev->irq, mantis->latency,
+ mantis->mantis_addr, mantis->mantis_mmio);
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis I2C init failed");
+ return err;
+ }
+ err = get_mac_address(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "get MAC address failed");
+ return err;
+ }
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis DMA init failed");
+ return err;
+ }
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_DEBUG, 1, "Mantis DVB init failed");
+ return err;
+ }
+ err = mantis_uart_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_DEBUG, 1, "Mantis UART init failed");
+ return err;
+ }
+
+ return 0;
+}
+
+int mantis_core_exit(struct mantis_pci *mantis)
+{
+ mantis_dma_stop(mantis);
+ dprintk(verbose, MANTIS_ERROR, 1, "DMA engine stopping");
+
+ mantis_uart_exit(mantis);
+ dprintk(verbose, MANTIS_ERROR, 1, "UART exit failed");
+
+ if (mantis_dma_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "DMA exit failed");
+ if (mantis_dvb_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "DVB exit failed");
+ if (mantis_i2c_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "I2C adapter delete.. failed");
+
+ return 0;
+}
+
+/* Turn the given bit on or off. */
+void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
+{
+ u32 cur;
+
+ cur = mmread(MANTIS_GPIF_ADDR);
+ if (value)
+ mantis->gpio_status = cur | (1 << bitpos);
+ else
+ mantis->gpio_status = cur & (~(1 << bitpos));
+
+ mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+ udelay(100);
+}
+
+/* direction = 0 , no CI passthrough ; 1 , CI passthrough */
+void mantis_set_direction(struct mantis_pci *mantis, int direction)
+{
+ u32 reg;
+
+ reg = mmread(0x28);
+ dprintk(verbose, MANTIS_DEBUG, 1, "TS direction setup");
+ if (direction == 0x01) {
+ /* to CI */
+ reg |= 0x04;
+ mmwrite(reg, 0x28);
+ reg &= 0xff - 0x04;
+ mmwrite(reg, 0x28);
+ } else {
+ reg &= 0xff - 0x04;
+ mmwrite(reg, 0x28);
+ reg |= 0x04;
+ mmwrite(reg, 0x28);
+ }
+}
diff --git a/drivers/media/dvb/mantis/mantis_core.h b/drivers/media/dvb/mantis/mantis_core.h
new file mode 100644
index 0000000..833ee42
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.h
@@ -0,0 +1,57 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_CORE_H
+#define __MANTIS_CORE_H
+
+#include "mantis_common.h"
+
+
+#define FE_TYPE_SAT 0
+#define FE_TYPE_CAB 1
+#define FE_TYPE_TER 2
+
+#define FE_TYPE_TS204 0
+#define FE_TYPE_TS188 1
+
+
+struct vendorname {
+ u8 *sub_vendor_name;
+ u32 sub_vendor_id;
+};
+
+struct devicetype {
+ u8 *sub_device_name;
+ u32 sub_device_id;
+ u8 device_type;
+ u32 type_flags;
+};
+
+
+extern int mantis_dma_init(struct mantis_pci *mantis);
+extern int mantis_dma_exit(struct mantis_pci *mantis);
+extern void mantis_dma_start(struct mantis_pci *mantis);
+extern void mantis_dma_stop(struct mantis_pci *mantis);
+extern int mantis_i2c_init(struct mantis_pci *mantis);
+extern int mantis_i2c_exit(struct mantis_pci *mantis);
+extern int mantis_core_init(struct mantis_pci *mantis);
+extern int mantis_core_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_CORE_H */
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
new file mode 100644
index 0000000..46202a4
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -0,0 +1,256 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_dma.h"
+
+#define RISC_WRITE (0x01 << 28)
+#define RISC_JUMP (0x07 << 28)
+#define RISC_IRQ (0x01 << 24)
+
+#define RISC_STATUS(status) ((((~status) & 0x0f) << 20) | ((status & 0x0f) << 16))
+#define RISC_FLUSH() (mantis->risc_pos = 0)
+#define RISC_INSTR(opcode) (mantis->risc_cpu[mantis->risc_pos++] = cpu_to_le32(opcode))
+
+#define MANTIS_BUF_SIZE (64 * 1024)
+#define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE >> 4)
+#define MANTIS_BLOCK_COUNT (1 << 4)
+#define MANTIS_RISC_SIZE PAGE_SIZE
+
+int mantis_dma_exit(struct mantis_pci *mantis)
+{
+ if (mantis->buf_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "DMA=0x%lx cpu=0x%p size=%d",
+ (unsigned long) mantis->buf_dma,
+ mantis->buf_cpu,
+ MANTIS_BUF_SIZE);
+
+ pci_free_consistent(mantis->pdev, MANTIS_BUF_SIZE,
+ mantis->buf_cpu, mantis->buf_dma);
+
+ mantis->buf_cpu = NULL;
+ }
+ if (mantis->risc_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "RISC=0x%lx cpu=0x%p size=%lx",
+ (unsigned long) mantis->risc_dma,
+ mantis->risc_cpu,
+ MANTIS_RISC_SIZE);
+
+ pci_free_consistent(mantis->pdev, MANTIS_RISC_SIZE,
+ mantis->risc_cpu, mantis->risc_dma);
+
+ mantis->risc_cpu = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_dma_exit);
+
+static inline int mantis_alloc_buffers(struct mantis_pci *mantis)
+{
+ if (!mantis->buf_cpu) {
+ mantis->buf_cpu = pci_alloc_consistent(mantis->pdev,
+ MANTIS_BUF_SIZE,
+ &mantis->buf_dma);
+ if (!mantis->buf_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "DMA buffer allocation failed");
+
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1,
+ "DMA=0x%lx cpu=0x%p size=%d",
+ (unsigned long) mantis->buf_dma,
+ mantis->buf_cpu, MANTIS_BUF_SIZE);
+ }
+ if (!mantis->risc_cpu) {
+ mantis->risc_cpu = pci_alloc_consistent(mantis->pdev,
+ MANTIS_RISC_SIZE,
+ &mantis->risc_dma);
+
+ if (!mantis->risc_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "RISC program allocation failed");
+
+ mantis_dma_exit(mantis);
+
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1,
+ "RISC=0x%lx cpu=0x%p size=%lx",
+ (unsigned long) mantis->risc_dma,
+ mantis->risc_cpu, MANTIS_RISC_SIZE);
+ }
+
+ return 0;
+err:
+ dprintk(MANTIS_ERROR, 1, "Out of memory (?) .....");
+ return -ENOMEM;
+}
+
+static inline int mantis_calc_lines(struct mantis_pci *mantis)
+{
+ mantis->line_bytes = MANTIS_BLOCK_BYTES;
+ mantis->line_count = MANTIS_BLOCK_COUNT;
+
+ while (mantis->line_bytes > 4095) {
+ mantis->line_bytes >>= 1;
+ mantis->line_count <<= 1;
+ }
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis RISC block bytes=[%d], line bytes=[%d], line count=[%d]",
+ MANTIS_BLOCK_BYTES, mantis->line_bytes, mantis->line_count);
+
+ if (mantis->line_count > 255) {
+ dprintk(MANTIS_ERROR, 1, "Buffer size error");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mantis_dma_init(struct mantis_pci *mantis)
+{
+ int err = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
+ if (mantis_alloc_buffers(mantis) < 0) {
+ dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
+
+ /* Stop RISC Engine */
+ mmwrite(0, MANTIS_DMA_CTL);
+
+ goto err;
+ }
+ err = mantis_calc_lines(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "Mantis calc lines failed");
+
+ goto err;
+ }
+
+ return 0;
+err:
+ return err;
+}
+EXPORT_SYMBOL_GPL(mantis_dma_init);
+
+static inline void mantis_risc_program(struct mantis_pci *mantis)
+{
+ u32 buf_pos = 0;
+ u32 line;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis create RISC program");
+ RISC_FLUSH();
+
+ dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u",
+ mantis->line_count, mantis->line_bytes);
+
+ for (line = 0; line < mantis->line_count; line++) {
+ dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d]", line);
+ if (!(buf_pos % MANTIS_BLOCK_BYTES)) {
+ RISC_INSTR(RISC_WRITE |
+ RISC_IRQ |
+ RISC_STATUS(((buf_pos / MANTIS_BLOCK_BYTES) +
+ (MANTIS_BLOCK_COUNT - 1)) %
+ MANTIS_BLOCK_COUNT) |
+ mantis->line_bytes);
+ } else {
+ RISC_INSTR(RISC_WRITE | mantis->line_bytes);
+ }
+ RISC_INSTR(mantis->buf_dma + buf_pos);
+ buf_pos += mantis->line_bytes;
+ }
+ RISC_INSTR(RISC_JUMP);
+ RISC_INSTR(mantis->risc_dma);
+}
+
+void mantis_dma_start(struct mantis_pci *mantis)
+{
+ dprintk(MANTIS_DEBUG, 1, "Mantis Start DMA engine");
+
+ mantis_risc_program(mantis);
+ mmwrite(mantis->risc_dma, MANTIS_RISC_START);
+ mmwrite(mmread(MANTIS_GPIF_ADDR) | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ mmwrite(0, MANTIS_DMA_CTL);
+ mantis->last_block = mantis->finished_block = 0;
+
+ mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_RISCI, MANTIS_INT_MASK);
+
+ mmwrite(MANTIS_FIFO_EN | MANTIS_DCAP_EN
+ | MANTIS_RISC_EN, MANTIS_DMA_CTL);
+
+}
+
+void mantis_dma_stop(struct mantis_pci *mantis)
+{
+ u32 stat = 0, mask = 0;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine");
+
+ mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR);
+
+ mmwrite((mmread(MANTIS_DMA_CTL) & ~(MANTIS_FIFO_EN |
+ MANTIS_DCAP_EN |
+ MANTIS_RISC_EN)), MANTIS_DMA_CTL);
+
+ mmwrite(mmread(MANTIS_INT_STAT), MANTIS_INT_STAT);
+
+ mmwrite(mmread(MANTIS_INT_MASK) & ~(MANTIS_INT_RISCI |
+ MANTIS_INT_RISCEN), MANTIS_INT_MASK);
+}
+
+
+void mantis_dma_xfer(unsigned long data)
+{
+ struct mantis_pci *mantis = (struct mantis_pci *) data;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ while (mantis->last_block != mantis->finished_block) {
+ dprintk(MANTIS_DEBUG, 1, "last block=[%d] finished block=[%d]",
+ mantis->last_block, mantis->finished_block);
+
+ (config->ts_size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
+ (&mantis->demux, &mantis->buf_cpu[mantis->last_block * MANTIS_BLOCK_BYTES], MANTIS_BLOCK_BYTES);
+ mantis->last_block = (mantis->last_block + 1) % MANTIS_BLOCK_COUNT;
+ }
+}
diff --git a/drivers/media/dvb/mantis/mantis_dma.h b/drivers/media/dvb/mantis/mantis_dma.h
new file mode 100644
index 0000000..6be00fa
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.h
@@ -0,0 +1,30 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_DMA_H
+#define __MANTIS_DMA_H
+
+extern int mantis_dma_init(struct mantis_pci *mantis);
+extern int mantis_dma_exit(struct mantis_pci *mantis);
+extern void mantis_dma_start(struct mantis_pci *mantis);
+extern void mantis_dma_stop(struct mantis_pci *mantis);
+extern void mantis_dma_xfer(unsigned long data);
+
+#endif /* __MANTIS_DMA_H */
diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
new file mode 100644
index 0000000..99d82ee
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.c
@@ -0,0 +1,296 @@
+/*
+ Mantis PCI bridge driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_dma.h"
+#include "mantis_ca.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ switch (power) {
+ case POWER_ON:
+ dprintk(MANTIS_DEBUG, 1, "Power ON");
+ gpio_set_bits(mantis, config->power, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->power, POWER_ON);
+ msleep(100);
+ break;
+
+ case POWER_OFF:
+ dprintk(MANTIS_DEBUG, 1, "Power OFF");
+ gpio_set_bits(mantis, config->power, POWER_OFF);
+ msleep(100);
+ break;
+
+ default:
+ dprintk(MANTIS_DEBUG, 1, "Unknown state <%02x>", power);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_frontend_power);
+
+void mantis_frontend_soft_reset(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ dprintk(MANTIS_DEBUG, 1, "Frontend RESET");
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+ msleep(100);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(mantis_frontend_soft_reset);
+
+static int mantis_frontend_shutdown(struct mantis_pci *mantis)
+{
+ int err;
+
+ mantis_frontend_soft_reset(mantis);
+ err = mantis_frontend_power(mantis, POWER_OFF);
+ if (err != 0) {
+ dprintk(MANTIS_ERROR, 1, "Frontend POWER OFF failed! <%d>", err);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mantis_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct mantis_pci *mantis = dvbdmx->priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DVB Start feed");
+ if (!dvbdmx->dmx.frontend) {
+ dprintk(MANTIS_DEBUG, 1, "no frontend ?");
+ return -EINVAL;
+ }
+
+ mantis->feeds++;
+ dprintk(MANTIS_DEBUG, 1, "mantis start feed, feeds=%d", mantis->feeds);
+
+ if (mantis->feeds == 1) {
+ dprintk(MANTIS_DEBUG, 1, "mantis start feed & dma");
+ mantis_dma_start(mantis);
+ }
+
+ return mantis->feeds;
+}
+
+static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct mantis_pci *mantis = dvbdmx->priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DVB Stop feed");
+ if (!dvbdmx->dmx.frontend) {
+ dprintk(MANTIS_DEBUG, 1, "no frontend ?");
+ return -EINVAL;
+ }
+
+ mantis->feeds--;
+ if (mantis->feeds == 0) {
+ dprintk(MANTIS_DEBUG, 1, "mantis stop feed and dma");
+ mantis_dma_stop(mantis);
+ }
+
+ return 0;
+}
+
+int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int result = -1;
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
+
+ result = dvb_register_adapter(&mantis->dvb_adapter,
+ "Mantis DVB adapter",
+ THIS_MODULE,
+ &mantis->pdev->dev,
+ adapter_nr);
+
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "Error registering adapter");
+ return -ENODEV;
+ }
+
+ mantis->dvb_adapter.priv = mantis;
+ mantis->demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+
+ mantis->demux.priv = mantis;
+ mantis->demux.filternum = 256;
+ mantis->demux.feednum = 256;
+ mantis->demux.start_feed = mantis_dvb_start_feed;
+ mantis->demux.stop_feed = mantis_dvb_stop_feed;
+ mantis->demux.write_to_decoder = NULL;
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_dmx_init");
+ result = dvb_dmx_init(&mantis->demux);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+
+ goto err0;
+ }
+
+ mantis->dmxdev.filternum = 256;
+ mantis->dmxdev.demux = &mantis->demux.dmx;
+ mantis->dmxdev.capabilities = 0;
+ dprintk(MANTIS_DEBUG, 1, "dvb_dmxdev_init");
+
+ result = dvb_dmxdev_init(&mantis->dmxdev, &mantis->dvb_adapter);
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "dvb_dmxdev_init failed, ERROR=%d", result);
+ goto err1;
+ }
+
+ mantis->fe_hw.source = DMX_FRONTEND_0;
+ result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err2;
+ }
+
+ mantis->fe_mem.source = DMX_MEMORY_FE;
+ result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err3;
+ }
+
+ result = mantis->demux.dmx.connect_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err4;
+ }
+
+ dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
+ tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
+ if (mantis->hwconfig) {
+ result = config->frontend_init(mantis, mantis->fe);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "!!! NO Frontends found !!!");
+ goto err5;
+ } else {
+ if (mantis->fe == NULL) {
+ dprintk(MANTIS_ERROR, 1, "FE <NULL>");
+ goto err5;
+ }
+
+ if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
+
+ if (mantis->fe->ops.release)
+ mantis->fe->ops.release(mantis->fe);
+
+ mantis->fe = NULL;
+ goto err5;
+ }
+ }
+ }
+
+ return 0;
+
+ /* Error conditions .. */
+err5:
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+ dvb_unregister_frontend(mantis->fe);
+ dvb_frontend_detach(mantis->fe);
+err4:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+
+err3:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+
+err2:
+ dvb_dmxdev_release(&mantis->dmxdev);
+
+err1:
+ dvb_dmx_release(&mantis->demux);
+
+err0:
+ dvb_unregister_adapter(&mantis->dvb_adapter);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mantis_dvb_init);
+
+int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
+{
+ int err;
+
+ if (mantis->fe) {
+ /* mantis_ca_exit(mantis); */
+ err = mantis_frontend_shutdown(mantis);
+ if (err != 0)
+ dprintk(MANTIS_ERROR, 1, "Frontend exit while POWER ON! <%d>", err);
+ dvb_unregister_frontend(mantis->fe);
+ dvb_frontend_detach(mantis->fe);
+ }
+
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+
+ dvb_dmxdev_release(&mantis->dmxdev);
+ dvb_dmx_release(&mantis->demux);
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_unregister_adapter");
+ dvb_unregister_adapter(&mantis->dvb_adapter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_dvb_exit);
diff --git a/drivers/media/dvb/mantis/mantis_dvb.h b/drivers/media/dvb/mantis/mantis_dvb.h
new file mode 100644
index 0000000..464199d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.h
@@ -0,0 +1,35 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_DVB_H
+#define __MANTIS_DVB_H
+
+enum mantis_power {
+ POWER_OFF = 0,
+ POWER_ON = 1
+};
+
+extern int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power);
+extern void mantis_frontend_soft_reset(struct mantis_pci *mantis);
+
+extern int mantis_dvb_init(struct mantis_pci *mantis);
+extern int mantis_dvb_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_DVB_H */
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
new file mode 100644
index 0000000..a7b369a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -0,0 +1,117 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h"
+#include "mantis_hif.h"
+#include "mantis_reg.h"
+
+static void mantis_hifevm_work(struct work_struct *work)
+{
+ struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_stat, gpif_mask;
+
+ gpif_stat = mmread(MANTIS_GPIF_STATUS);
+ gpif_mask = mmread(MANTIS_GPIF_IRQCFG);
+
+ if (gpif_stat & MANTIS_GPIF_DETSTAT) {
+ if (gpif_stat & MANTIS_CARD_PLUGIN) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Plugin", mantis->num);
+ mmwrite(0xdada0000, MANTIS_CARD_RESET);
+ mantis_event_cam_plugin(ca);
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_INSERTED);
+ }
+ } else {
+ if (gpif_stat & MANTIS_CARD_PLUGOUT) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Unplug", mantis->num);
+ mmwrite(0xdada0000, MANTIS_CARD_RESET);
+ mantis_event_cam_unplug(ca);
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_REMOVED);
+ }
+ }
+
+ if (mantis->gpif_status & MANTIS_GPIF_EXTIRQ)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Ext IRQ", mantis->num);
+
+ if (mantis->gpif_status & MANTIS_SBUF_WSTO)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Timeout", mantis->num);
+
+ if (mantis->gpif_status & MANTIS_GPIF_OTHERR)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Alignment Error", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_OVFLW)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Overflow", mantis->num);
+
+ if (gpif_stat & MANTIS_GPIF_BRRDY)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Read Ready", mantis->num);
+
+ if (gpif_stat & MANTIS_GPIF_INTSTAT)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): GPIF IRQ", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_EMPTY)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Empty", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_OPDONE) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer operation complete", mantis->num);
+ ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL;
+ ca->hif_event = MANTIS_SBUF_OPDONE;
+ wake_up(&ca->hif_opdone_wq);
+ }
+}
+
+int mantis_evmgr_init(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing Mantis Host I/F Event manager");
+ INIT_WORK(&ca->hif_evm_work, mantis_hifevm_work);
+ mantis_pcmcia_init(ca);
+ schedule_work(&ca->hif_evm_work);
+ mantis_hif_init(ca);
+ return 0;
+}
+
+void mantis_evmgr_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
+ flush_scheduled_work();
+ mantis_hif_exit(ca);
+ mantis_pcmcia_exit(ca);
+}
diff --git a/drivers/media/dvb/mantis/mantis_hif.c b/drivers/media/dvb/mantis/mantis_hif.c
new file mode 100644
index 0000000..7477dac
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.c
@@ -0,0 +1,240 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+
+#include "mantis_hif.h"
+#include "mantis_link.h" /* temporary due to physical layer stuff */
+
+#include "mantis_reg.h"
+
+
+static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ int rc = 0;
+
+ if (wait_event_timeout(ca->hif_opdone_wq,
+ ca->hif_event & MANTIS_SBUF_OPDONE,
+ msecs_to_jiffies(500)) == -ERESTARTSYS) {
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Smart buffer operation timeout !", mantis->num);
+ rc = -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Smart Buffer Operation complete");
+ ca->hif_event &= ~MANTIS_SBUF_OPDONE;
+ return rc;
+}
+
+static int mantis_hif_write_wait(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 opdone = 0, timeout = 0;
+ int rc = 0;
+
+ if (wait_event_timeout(ca->hif_write_wq,
+ mantis->gpif_status & MANTIS_GPIF_WRACK,
+ msecs_to_jiffies(500)) == -ERESTARTSYS) {
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Write ACK timed out !", mantis->num);
+ rc = -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Write Acknowledged");
+ mantis->gpif_status &= ~MANTIS_GPIF_WRACK;
+ while (!opdone) {
+ opdone = (mmread(MANTIS_GPIF_STATUS) & MANTIS_SBUF_OPDONE);
+ udelay(500);
+ timeout++;
+ if (timeout > 100) {
+ dprintk(MANTIS_ERROR, 1, "Adater(%d) Slot(0): Write operation timed out!", mantis->num);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ dprintk(MANTIS_DEBUG, 1, "HIF Write success");
+ return rc;
+}
+
+
+int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0, data, count = 4;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Read", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
+ mmwrite(count, MANTIS_GPIF_BRBYTES);
+ udelay(20);
+ mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): GPIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ data = mmread(MANTIS_GPIF_DIN);
+ mutex_unlock(&ca->ca_lock);
+ dprintk(MANTIS_DEBUG, 1, "Mem Read: 0x%02x", data);
+ return (data >> 24) & 0xff;
+}
+
+int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data)
+{
+ struct mantis_slot *slot = ca->slot;
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Write", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(slot->slave_cfg, MANTIS_GPIF_CFGSLA); /* Slot0 alone for now */
+ mmwrite(hif_addr, MANTIS_GPIF_ADDR);
+ mmwrite(data, MANTIS_GPIF_DOUT);
+
+ if (mantis_hif_write_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Mem Write: (0x%02x to 0x%02x)", data, addr);
+ mutex_unlock(&ca->ca_lock);
+
+ return 0;
+}
+
+int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 data, hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Read", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr |= MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
+ mmwrite(1, MANTIS_GPIF_BRBYTES);
+ udelay(20);
+ mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ data = mmread(MANTIS_GPIF_DIN);
+ dprintk(MANTIS_DEBUG, 1, "I/O Read: 0x%02x", data);
+ udelay(50);
+ mutex_unlock(&ca->ca_lock);
+
+ return (u8) data;
+}
+
+int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Write", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
+ hif_addr |= MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_ADDR);
+ mmwrite(data, MANTIS_GPIF_DOUT);
+
+ if (mantis_hif_write_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "I/O Write: (0x%02x to 0x%02x)", data, addr);
+ mutex_unlock(&ca->ca_lock);
+ udelay(50);
+
+ return 0;
+}
+
+int mantis_hif_init(struct mantis_ca *ca)
+{
+ struct mantis_slot *slot = ca->slot;
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 irqcfg;
+
+ slot[0].slave_cfg = 0x70773028;
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Initializing Mantis Host Interface", mantis->num);
+
+ mutex_lock(&ca->ca_lock);
+ irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ irqcfg = MANTIS_MASK_BRRDY |
+ MANTIS_MASK_WRACK |
+ MANTIS_MASK_EXTIRQ |
+ MANTIS_MASK_WSTO |
+ MANTIS_MASK_OTHERR |
+ MANTIS_MASK_OVFLW;
+
+ mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
+ mutex_unlock(&ca->ca_lock);
+
+ return 0;
+}
+
+void mantis_hif_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 irqcfg;
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Exiting Mantis Host Interface", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ irqcfg &= ~MANTIS_MASK_BRRDY;
+ mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
+ mutex_unlock(&ca->ca_lock);
+}
diff --git a/drivers/media/dvb/mantis/mantis_hif.h b/drivers/media/dvb/mantis/mantis_hif.h
new file mode 100644
index 0000000..9094f9e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.h
@@ -0,0 +1,29 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_HIF_H
+#define __MANTIS_HIF_H
+
+#define MANTIS_HIF_MEMRD 1
+#define MANTIS_HIF_MEMWR 2
+#define MANTIS_HIF_IOMRD 3
+#define MANTIS_HIF_IOMWR 4
+
+#endif /* __MANTIS_HIF_H */
diff --git a/drivers/media/dvb/mantis/mantis_i2c.c b/drivers/media/dvb/mantis/mantis_i2c.c
new file mode 100644
index 0000000..7870bcf
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.c
@@ -0,0 +1,267 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_i2c.h"
+
+#define TRIALS 10000
+
+static int mantis_i2c_read(struct mantis_pci *mantis, const struct i2c_msg *msg)
+{
+ u32 rxd, i, stat, trials;
+
+ dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <R>[ ",
+ __func__, msg->addr);
+
+ for (i = 0; i < msg->len; i++) {
+ rxd = (msg->addr << 25) | (1 << 24)
+ | MANTIS_I2C_RATE_3
+ | MANTIS_I2C_STOP
+ | MANTIS_I2C_PGMODE;
+
+ if (i == (msg->len - 1))
+ rxd &= ~MANTIS_I2C_STOP;
+
+ mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
+ mmwrite(rxd, MANTIS_I2CDATA_CTL);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CRACK)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
+
+ rxd = mmread(MANTIS_I2CDATA_CTL);
+ msg->buf[i] = (u8)((rxd >> 8) & 0xFF);
+ dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
+ }
+ dprintk(MANTIS_INFO, 0, "]\n");
+
+ return 0;
+}
+
+static int mantis_i2c_write(struct mantis_pci *mantis, const struct i2c_msg *msg)
+{
+ int i;
+ u32 txd = 0, stat, trials;
+
+ dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <W>[ ",
+ __func__, msg->addr);
+
+ for (i = 0; i < msg->len; i++) {
+ dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
+ txd = (msg->addr << 25) | (msg->buf[i] << 8)
+ | MANTIS_I2C_RATE_3
+ | MANTIS_I2C_STOP
+ | MANTIS_I2C_PGMODE;
+
+ if (i == (msg->len - 1))
+ txd &= ~MANTIS_I2C_STOP;
+
+ mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
+ mmwrite(txd, MANTIS_I2CDATA_CTL);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CRACK)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
+ }
+ dprintk(MANTIS_INFO, 0, "]\n");
+
+ return 0;
+}
+
+static int mantis_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+ int ret = 0, i = 0, trials;
+ u32 stat, data, txd;
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+
+ mantis = i2c_get_adapdata(adapter);
+ BUG_ON(!mantis);
+ config = mantis->hwconfig;
+ BUG_ON(!config);
+
+ dprintk(MANTIS_DEBUG, 1, "Messages:%d", num);
+ mutex_lock(&mantis->i2c_lock);
+
+ while (i < num) {
+ /* Byte MODE */
+ if ((config->i2c_mode & MANTIS_BYTE_MODE) &&
+ ((i + 1) < num) &&
+ (msgs[i].len < 2) &&
+ (msgs[i + 1].len < 2) &&
+ (msgs[i + 1].flags & I2C_M_RD)) {
+
+ dprintk(MANTIS_DEBUG, 0, " Byte MODE:\n");
+
+ /* Read operation */
+ txd = msgs[i].addr << 25 | (0x1 << 24)
+ | (msgs[i].buf[0] << 16)
+ | MANTIS_I2C_RATE_3;
+
+ mmwrite(txd, MANTIS_I2CDATA_CTL);
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ /* check for xfer completion */
+ if (stat & MANTIS_INT_I2CDONE) {
+ /* check xfer was acknowledged */
+ if (stat & MANTIS_INT_I2CRACK) {
+ data = mmread(MANTIS_I2CDATA_CTL);
+ msgs[i + 1].buf[0] = (data >> 8) & 0xff;
+ dprintk(MANTIS_DEBUG, 0, " Byte <%d> RXD=0x%02x [%02x]\n", 0x0, data, msgs[i + 1].buf[0]);
+ } else {
+ /* I/O error */
+ dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
+ ret = -EIO;
+ break;
+ }
+ } else {
+ /* I/O error */
+ dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
+ ret = -EIO;
+ break;
+ }
+ i += 2; /* Write/Read operation in one go */
+ }
+
+ if (i < num) {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = mantis_i2c_read(mantis, &msgs[i]);
+ else
+ ret = mantis_i2c_write(mantis, &msgs[i]);
+
+ i++;
+ if (ret < 0)
+ goto bail_out;
+ }
+
+ }
+
+ mutex_unlock(&mantis->i2c_lock);
+
+ return num;
+
+bail_out:
+ mutex_unlock(&mantis->i2c_lock);
+ return ret;
+}
+
+static u32 mantis_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm mantis_algo = {
+ .master_xfer = mantis_i2c_xfer,
+ .functionality = mantis_i2c_func,
+};
+
+int __devinit mantis_i2c_init(struct mantis_pci *mantis)
+{
+ u32 intstat, intmask;
+ struct i2c_adapter *i2c_adapter = &mantis->adapter;
+ struct pci_dev *pdev = mantis->pdev;
+
+ init_waitqueue_head(&mantis->i2c_wq);
+ mutex_init(&mantis->i2c_lock);
+ strncpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
+ i2c_set_adapdata(i2c_adapter, mantis);
+
+ i2c_adapter->owner = THIS_MODULE;
+ i2c_adapter->class = I2C_CLASS_TV_DIGITAL;
+ i2c_adapter->algo = &mantis_algo;
+ i2c_adapter->algo_data = NULL;
+ i2c_adapter->timeout = 500;
+ i2c_adapter->retries = 3;
+ i2c_adapter->dev.parent = &pdev->dev;
+
+ mantis->i2c_rc = i2c_add_adapter(i2c_adapter);
+ if (mantis->i2c_rc < 0)
+ return mantis->i2c_rc;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing I2C ..");
+
+ intstat = mmread(MANTIS_INT_STAT);
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite(intstat, MANTIS_INT_STAT);
+ dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_i2c_init);
+
+int mantis_i2c_exit(struct mantis_pci *mantis)
+{
+ u32 intmask;
+
+ dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
+
+ dprintk(MANTIS_DEBUG, 1, "Removing I2C adapter");
+ return i2c_del_adapter(&mantis->adapter);
+}
+EXPORT_SYMBOL_GPL(mantis_i2c_exit);
diff --git a/drivers/media/dvb/mantis/mantis_i2c.h b/drivers/media/dvb/mantis/mantis_i2c.h
new file mode 100644
index 0000000..1342df2
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.h
@@ -0,0 +1,30 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_I2C_H
+#define __MANTIS_I2C_H
+
+#define I2C_STOP (1 << 0)
+#define I2C_READ (1 << 1)
+
+extern int mantis_i2c_init(struct mantis_pci *mantis);
+extern int mantis_i2c_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_I2C_H */
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
new file mode 100644
index 0000000..6a9df77
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -0,0 +1,148 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/input.h>
+#include <media/ir-common.h>
+#include <linux/pci.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_uart.h"
+
+static struct ir_scancode mantis_ir_table[] = {
+ { 0x29, KEY_POWER },
+ { 0x28, KEY_FAVORITES },
+ { 0x30, KEY_TEXT },
+ { 0x17, KEY_INFO }, /* Preview */
+ { 0x23, KEY_EPG },
+ { 0x3b, KEY_F22 }, /* Record List */
+ { 0x3c, KEY_1 },
+ { 0x3e, KEY_2 },
+ { 0x39, KEY_3 },
+ { 0x36, KEY_4 },
+ { 0x22, KEY_5 },
+ { 0x20, KEY_6 },
+ { 0x32, KEY_7 },
+ { 0x26, KEY_8 },
+ { 0x24, KEY_9 },
+ { 0x2a, KEY_0 },
+
+ { 0x33, KEY_CANCEL },
+ { 0x2c, KEY_BACK },
+ { 0x15, KEY_CLEAR },
+ { 0x3f, KEY_TAB },
+ { 0x10, KEY_ENTER },
+ { 0x14, KEY_UP },
+ { 0x0d, KEY_RIGHT },
+ { 0x0e, KEY_DOWN },
+ { 0x11, KEY_LEFT },
+
+ { 0x21, KEY_VOLUMEUP },
+ { 0x35, KEY_VOLUMEDOWN },
+ { 0x3d, KEY_CHANNELDOWN },
+ { 0x3a, KEY_CHANNELUP },
+ { 0x2e, KEY_RECORD },
+ { 0x2b, KEY_PLAY },
+ { 0x13, KEY_PAUSE },
+ { 0x25, KEY_STOP },
+
+ { 0x1f, KEY_REWIND },
+ { 0x2d, KEY_FASTFORWARD },
+ { 0x1e, KEY_PREVIOUS }, /* Replay |< */
+ { 0x1d, KEY_NEXT }, /* Skip >| */
+
+ { 0x0b, KEY_CAMERA }, /* Capture */
+ { 0x0f, KEY_LANGUAGE }, /* SAP */
+ { 0x18, KEY_MODE }, /* PIP */
+ { 0x12, KEY_ZOOM }, /* Full screen */
+ { 0x1c, KEY_SUBTITLE },
+ { 0x2f, KEY_MUTE },
+ { 0x16, KEY_F20 }, /* L/R */
+ { 0x38, KEY_F21 }, /* Hibernate */
+
+ { 0x37, KEY_SWITCHVIDEOMODE }, /* A/V */
+ { 0x31, KEY_AGAIN }, /* Recall */
+ { 0x1a, KEY_KPPLUS }, /* Zoom+ */
+ { 0x19, KEY_KPMINUS }, /* Zoom- */
+ { 0x27, KEY_RED },
+ { 0x0C, KEY_GREEN },
+ { 0x01, KEY_YELLOW },
+ { 0x00, KEY_BLUE },
+};
+
+struct ir_scancode_table ir_mantis = {
+ .scan = mantis_ir_table,
+ .size = ARRAY_SIZE(mantis_ir_table),
+};
+EXPORT_SYMBOL_GPL(ir_mantis);
+
+int mantis_input_init(struct mantis_pci *mantis)
+{
+ struct input_dev *rc;
+ struct ir_input_state rc_state;
+ char name[80], dev[80];
+ int err;
+
+ rc = input_allocate_device();
+ if (!rc) {
+ dprintk(MANTIS_ERROR, 1, "Input device allocate failed");
+ return -ENOMEM;
+ }
+
+ sprintf(name, "Mantis %s IR receiver", mantis->hwconfig->model_name);
+ sprintf(dev, "pci-%s/ir0", pci_name(mantis->pdev));
+
+ rc->name = name;
+ rc->phys = dev;
+
+ ir_input_init(rc, &rc_state, IR_TYPE_OTHER);
+
+ rc->id.bustype = BUS_PCI;
+ rc->id.vendor = mantis->vendor_id;
+ rc->id.product = mantis->device_id;
+ rc->id.version = 1;
+ rc->dev = mantis->pdev->dev;
+
+ err = ir_input_register(rc, &ir_mantis);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
+ input_free_device(rc);
+ return -ENODEV;
+ }
+
+ mantis->rc = rc;
+
+ return 0;
+}
+
+int mantis_exit(struct mantis_pci *mantis)
+{
+ struct input_dev *rc = mantis->rc;
+
+ ir_input_unregister(rc);
+
+ return 0;
+}
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
new file mode 100644
index 0000000..de148de
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -0,0 +1,130 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_ioc.h"
+
+static int read_eeprom_bytes(struct mantis_pci *mantis, u8 reg, u8 *data, u8 length)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ int err;
+ u8 buf = reg;
+
+ struct i2c_msg msg[] = {
+ { .addr = 0x50, .flags = 0, .buf = &buf, .len = 1 },
+ { .addr = 0x50, .flags = I2C_M_RD, .buf = data, .len = length },
+ };
+
+ err = i2c_transfer(adapter, msg, 2);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
+ err, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+int mantis_get_mac(struct mantis_pci *mantis)
+{
+ int err;
+ u8 mac_addr[6] = {0};
+
+ err = read_eeprom_bytes(mantis, 0x08, mac_addr, 6);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis EEPROM read error <%d>", err);
+
+ return err;
+ }
+
+ dprintk(MANTIS_ERROR, 0,
+ " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ mac_addr[0],
+ mac_addr[1],
+ mac_addr[2],
+ mac_addr[3],
+ mac_addr[4],
+ mac_addr[5]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_get_mac);
+
+/* Turn the given bit on or off. */
+void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
+{
+ u32 cur;
+
+ dprintk(MANTIS_DEBUG, 1, "Set Bit <%d> to <%d>", bitpos, value);
+ cur = mmread(MANTIS_GPIF_ADDR);
+ if (value)
+ mantis->gpio_status = cur | (1 << bitpos);
+ else
+ mantis->gpio_status = cur & (~(1 << bitpos));
+
+ dprintk(MANTIS_DEBUG, 1, "GPIO Value <%02x>", mantis->gpio_status);
+ mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+}
+EXPORT_SYMBOL_GPL(gpio_set_bits);
+
+int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl)
+{
+ u32 reg;
+
+ reg = mmread(MANTIS_CONTROL);
+ switch (stream_ctl) {
+ case STREAM_TO_HIF:
+ dprintk(MANTIS_DEBUG, 1, "Set stream to HIF");
+ reg &= 0xff - MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ reg |= MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ break;
+
+ case STREAM_TO_CAM:
+ dprintk(MANTIS_DEBUG, 1, "Set stream to CAM");
+ reg |= MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ reg &= 0xff - MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ break;
+ default:
+ dprintk(MANTIS_ERROR, 1, "Unknown MODE <%02x>", stream_ctl);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_stream_control);
diff --git a/drivers/media/dvb/mantis/mantis_ioc.h b/drivers/media/dvb/mantis/mantis_ioc.h
new file mode 100644
index 0000000..188fe5a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.h
@@ -0,0 +1,51 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_IOC_H
+#define __MANTIS_IOC_H
+
+#define GPIF_A00 0x00
+#define GPIF_A01 0x01
+#define GPIF_A02 0x02
+#define GPIF_A03 0x03
+#define GPIF_A04 0x04
+#define GPIF_A05 0x05
+#define GPIF_A06 0x06
+#define GPIF_A07 0x07
+#define GPIF_A08 0x08
+#define GPIF_A09 0x09
+#define GPIF_A10 0x0a
+#define GPIF_A11 0x0b
+
+#define GPIF_A12 0x0c
+#define GPIF_A13 0x0d
+#define GPIF_A14 0x0e
+
+enum mantis_stream_control {
+ STREAM_TO_HIF = 0,
+ STREAM_TO_CAM
+};
+
+extern int mantis_get_mac(struct mantis_pci *mantis);
+extern void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value);
+
+extern int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl);
+
+#endif /* __MANTIS_IOC_H */
diff --git a/drivers/media/dvb/mantis/mantis_link.h b/drivers/media/dvb/mantis/mantis_link.h
new file mode 100644
index 0000000..2a81477
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_link.h
@@ -0,0 +1,83 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_LINK_H
+#define __MANTIS_LINK_H
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "dvb_ca_en50221.h"
+
+enum mantis_sbuf_status {
+ MANTIS_SBUF_DATA_AVAIL = 1,
+ MANTIS_SBUF_DATA_EMPTY = 2,
+ MANTIS_SBUF_DATA_OVFLW = 3
+};
+
+struct mantis_slot {
+ u32 timeout;
+ u32 slave_cfg;
+ u32 bar;
+};
+
+/* Physical layer */
+enum mantis_slot_state {
+ MODULE_INSERTED = 3,
+ MODULE_XTRACTED = 4
+};
+
+struct mantis_ca {
+ struct mantis_slot slot[4];
+
+ struct work_struct hif_evm_work;
+
+ u32 hif_event;
+ wait_queue_head_t hif_opdone_wq;
+ wait_queue_head_t hif_brrdyw_wq;
+ wait_queue_head_t hif_data_wq;
+ wait_queue_head_t hif_write_wq; /* HIF Write op */
+
+ enum mantis_sbuf_status sbuf_status;
+
+ enum mantis_slot_state slot_state;
+
+ void *ca_priv;
+
+ struct dvb_ca_en50221 en50221;
+ struct mutex ca_lock;
+};
+
+/* CA */
+extern void mantis_event_cam_plugin(struct mantis_ca *ca);
+extern void mantis_event_cam_unplug(struct mantis_ca *ca);
+extern int mantis_pcmcia_init(struct mantis_ca *ca);
+extern void mantis_pcmcia_exit(struct mantis_ca *ca);
+extern int mantis_evmgr_init(struct mantis_ca *ca);
+extern void mantis_evmgr_exit(struct mantis_ca *ca);
+
+/* HIF */
+extern int mantis_hif_init(struct mantis_ca *ca);
+extern void mantis_hif_exit(struct mantis_ca *ca);
+extern int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr);
+extern int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data);
+extern int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr);
+extern int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data);
+
+#endif /* __MANTIS_LINK_H */
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
new file mode 100644
index 0000000..6c7534a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -0,0 +1,177 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_pci.h"
+
+#define DRIVER_NAME "Mantis Core"
+
+int __devinit mantis_pci_init(struct mantis_pci *mantis)
+{
+ u8 revision, latency;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ struct pci_dev *pdev = mantis->pdev;
+ int err, ret = 0;
+
+ dprintk(MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
+ config->model_name,
+ config->dev_type,
+ mantis->pdev->bus->number,
+ PCI_SLOT(mantis->pdev->devfn),
+ PCI_FUNC(mantis->pdev->devfn));
+
+ err = pci_enable_device(pdev);
+ if (err != 0) {
+ ret = -ENODEV;
+ dprintk(MANTIS_ERROR, 1, "ERROR: PCI enable failed <%i>", err);
+ goto fail0;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err != 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Unable to obtain 32 bit DMA <%i>", err);
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ pci_set_master(pdev);
+
+ if (!request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0),
+ DRIVER_NAME)) {
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 Request failed !");
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ mantis->mmio = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ if (!mantis->mmio) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 remap failed !");
+ ret = -ENODEV;
+ goto fail2;
+ }
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ mantis->latency = latency;
+ mantis->revision = revision;
+
+ dprintk(MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
+ mantis->revision,
+ mantis->pdev->subsystem_vendor,
+ mantis->pdev->subsystem_device);
+
+ dprintk(MANTIS_ERROR, 0,
+ "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
+ mantis->pdev->irq,
+ mantis->latency,
+ mantis->mantis_addr,
+ mantis->mmio);
+
+ err = request_irq(pdev->irq,
+ config->irq_handler,
+ IRQF_SHARED,
+ DRIVER_NAME,
+ mantis);
+
+ if (err != 0) {
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: IRQ registration failed ! <%d>", err);
+ ret = -ENODEV;
+ goto fail3;
+ }
+
+ pci_set_drvdata(pdev, mantis);
+ return ret;
+
+ /* Error conditions */
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> I/O unmap", ret);
+ if (mantis->mmio)
+ iounmap(mantis->mmio);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> releasing regions", ret);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> disabling device", ret);
+ pci_disable_device(pdev);
+
+fail0:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mantis_pci_init);
+
+void mantis_pci_exit(struct mantis_pci *mantis)
+{
+ struct pci_dev *pdev = mantis->pdev;
+
+ dprintk(MANTIS_NOTICE, 1, " mem: 0x%p", mantis->mmio);
+ free_irq(pdev->irq, mantis);
+ if (mantis->mmio) {
+ iounmap(mantis->mmio);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ }
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(mantis_pci_exit);
+
+MODULE_DESCRIPTION("Mantis PCI DTV bridge driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_pci.h b/drivers/media/dvb/mantis/mantis_pci.h
new file mode 100644
index 0000000..65f0045
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.h
@@ -0,0 +1,27 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_PCI_H
+#define __MANTIS_PCI_H
+
+extern int mantis_pci_init(struct mantis_pci *mantis);
+extern void mantis_pci_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_PCI_H */
diff --git a/drivers/media/dvb/mantis/mantis_pcmcia.c b/drivers/media/dvb/mantis/mantis_pcmcia.c
new file mode 100644
index 0000000..5cb545b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pcmcia.c
@@ -0,0 +1,120 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h" /* temporary due to physical layer stuff */
+#include "mantis_reg.h"
+
+/*
+ * If Slot state is already PLUG_IN event and we are called
+ * again, definitely it is jitter alone
+ */
+void mantis_event_cam_plugin(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_irqcfg;
+
+ if (ca->slot_state == MODULE_XTRACTED) {
+ dprintk(MANTIS_DEBUG, 1, "Event: CAM Plugged IN: Adapter(%d) Slot(0)", mantis->num);
+ udelay(50);
+ mmwrite(0xda000000, MANTIS_CARD_RESET);
+ gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ gpif_irqcfg |= MANTIS_MASK_PLUGOUT;
+ gpif_irqcfg &= ~MANTIS_MASK_PLUGIN;
+ mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
+ udelay(500);
+ ca->slot_state = MODULE_INSERTED;
+ }
+ udelay(100);
+}
+
+/*
+ * If Slot state is already UN_PLUG event and we are called
+ * again, definitely it is jitter alone
+ */
+void mantis_event_cam_unplug(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_irqcfg;
+
+ if (ca->slot_state == MODULE_INSERTED) {
+ dprintk(MANTIS_DEBUG, 1, "Event: CAM Unplugged: Adapter(%d) Slot(0)", mantis->num);
+ udelay(50);
+ mmwrite(0x00da0000, MANTIS_CARD_RESET);
+ gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ gpif_irqcfg |= MANTIS_MASK_PLUGIN;
+ gpif_irqcfg &= ~MANTIS_MASK_PLUGOUT;
+ mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
+ udelay(500);
+ ca->slot_state = MODULE_XTRACTED;
+ }
+ udelay(100);
+}
+
+int mantis_pcmcia_init(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_stat, card_stat;
+
+ mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_IRQ0, MANTIS_INT_MASK);
+ gpif_stat = mmread(MANTIS_GPIF_STATUS);
+ card_stat = mmread(MANTIS_GPIF_IRQCFG);
+
+ if (gpif_stat & MANTIS_GPIF_DETSTAT) {
+ dprintk(MANTIS_DEBUG, 1, "CAM found on Adapter(%d) Slot(0)", mantis->num);
+ mmwrite(card_stat | MANTIS_MASK_PLUGOUT, MANTIS_GPIF_IRQCFG);
+ ca->slot_state = MODULE_INSERTED;
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_INSERTED);
+ } else {
+ dprintk(MANTIS_DEBUG, 1, "Empty Slot on Adapter(%d) Slot(0)", mantis->num);
+ mmwrite(card_stat | MANTIS_MASK_PLUGIN, MANTIS_GPIF_IRQCFG);
+ ca->slot_state = MODULE_XTRACTED;
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_REMOVED);
+ }
+
+ return 0;
+}
+
+void mantis_pcmcia_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ mmwrite(mmread(MANTIS_GPIF_STATUS) & (~MANTIS_CARD_PLUGOUT | ~MANTIS_CARD_PLUGIN), MANTIS_GPIF_STATUS);
+ mmwrite(mmread(MANTIS_INT_MASK) & ~MANTIS_INT_IRQ0, MANTIS_INT_MASK);
+}
diff --git a/drivers/media/dvb/mantis/mantis_reg.h b/drivers/media/dvb/mantis/mantis_reg.h
new file mode 100644
index 0000000..7761f9d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_reg.h
@@ -0,0 +1,197 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_REG_H
+#define __MANTIS_REG_H
+
+/* Interrupts */
+#define MANTIS_INT_STAT 0x00
+#define MANTIS_INT_MASK 0x04
+
+#define MANTIS_INT_RISCSTAT (0x0f << 28)
+#define MANTIS_INT_RISCEN (0x01 << 27)
+#define MANTIS_INT_I2CRACK (0x01 << 26)
+
+/* #define MANTIS_INT_GPIF (0xff << 12) */
+
+#define MANTIS_INT_PCMCIA7 (0x01 << 19)
+#define MANTIS_INT_PCMCIA6 (0x01 << 18)
+#define MANTIS_INT_PCMCIA5 (0x01 << 17)
+#define MANTIS_INT_PCMCIA4 (0x01 << 16)
+#define MANTIS_INT_PCMCIA3 (0x01 << 15)
+#define MANTIS_INT_PCMCIA2 (0x01 << 14)
+#define MANTIS_INT_PCMCIA1 (0x01 << 13)
+#define MANTIS_INT_PCMCIA0 (0x01 << 12)
+#define MANTIS_INT_IRQ1 (0x01 << 11)
+#define MANTIS_INT_IRQ0 (0x01 << 10)
+#define MANTIS_INT_OCERR (0x01 << 8)
+#define MANTIS_INT_PABORT (0x01 << 7)
+#define MANTIS_INT_RIPERR (0x01 << 6)
+#define MANTIS_INT_PPERR (0x01 << 5)
+#define MANTIS_INT_FTRGT (0x01 << 3)
+#define MANTIS_INT_RISCI (0x01 << 1)
+#define MANTIS_INT_I2CDONE (0x01 << 0)
+
+/* DMA */
+#define MANTIS_DMA_CTL 0x08
+#define MANTIS_GPIF_RD (0xff << 24)
+#define MANTIS_GPIF_WR (0xff << 16)
+#define MANTIS_CPU_DO (0x01 << 10)
+#define MANTIS_DRV_DO (0x01 << 9)
+#define MANTIS_I2C_RD (0x01 << 7)
+#define MANTIS_I2C_WR (0x01 << 6)
+#define MANTIS_DCAP_MODE (0x01 << 5)
+#define MANTIS_FIFO_TP_4 (0x00 << 3)
+#define MANTIS_FIFO_TP_8 (0x01 << 3)
+#define MANTIS_FIFO_TP_16 (0x02 << 3)
+#define MANTIS_FIFO_EN (0x01 << 2)
+#define MANTIS_DCAP_EN (0x01 << 1)
+#define MANTIS_RISC_EN (0x01 << 0)
+
+/* DEBUG */
+#define MANTIS_DEBUGREG 0x0c
+#define MANTIS_DATINV (0x0e << 7)
+#define MANTIS_TOP_DEBUGSEL (0x07 << 4)
+#define MANTIS_PCMCIA_DEBUGSEL (0x0f << 0)
+
+#define MANTIS_RISC_START 0x10
+#define MANTIS_RISC_PC 0x14
+
+/* I2C */
+#define MANTIS_I2CDATA_CTL 0x18
+#define MANTIS_I2C_RATE_1 (0x00 << 6)
+#define MANTIS_I2C_RATE_2 (0x01 << 6)
+#define MANTIS_I2C_RATE_3 (0x02 << 6)
+#define MANTIS_I2C_RATE_4 (0x03 << 6)
+#define MANTIS_I2C_STOP (0x01 << 5)
+#define MANTIS_I2C_PGMODE (0x01 << 3)
+
+/* DATA */
+#define MANTIS_CMD_DATA_R1 0x20
+#define MANTIS_CMD_DATA_3 (0xff << 24)
+#define MANTIS_CMD_DATA_2 (0xff << 16)
+#define MANTIS_CMD_DATA_1 (0xff << 8)
+#define MANTIS_CMD_DATA_0 (0xff << 0)
+
+#define MANTIS_CMD_DATA_R2 0x24
+#define MANTIS_CMD_DATA_7 (0xff << 24)
+#define MANTIS_CMD_DATA_6 (0xff << 16)
+#define MANTIS_CMD_DATA_5 (0xff << 8)
+#define MANTIS_CMD_DATA_4 (0xff << 0)
+
+#define MANTIS_CONTROL 0x28
+#define MANTIS_DET (0x01 << 7)
+#define MANTIS_DAT_CF_EN (0x01 << 6)
+#define MANTIS_ACS (0x03 << 4)
+#define MANTIS_VCCEN (0x01 << 3)
+#define MANTIS_BYPASS (0x01 << 2)
+#define MANTIS_MRST (0x01 << 1)
+#define MANTIS_CRST_INT (0x01 << 0)
+
+#define MANTIS_GPIF_CFGSLA 0x84
+#define MANTIS_GPIF_WAITSMPL (0x07 << 28)
+#define MANTIS_GPIF_BYTEADDRSUB (0x01 << 25)
+#define MANTIS_GPIF_WAITPOL (0x01 << 24)
+#define MANTIS_GPIF_NCDELAY (0x07 << 20)
+#define MANTIS_GPIF_RW2CSDELAY (0x07 << 16)
+#define MANTIS_GPIF_SLFTIMEDMODE (0x01 << 15)
+#define MANTIS_GPIF_SLFTIMEDDELY (0x7f << 8)
+#define MANTIS_GPIF_DEVTYPE (0x07 << 4)
+#define MANTIS_GPIF_BIGENDIAN (0x01 << 3)
+#define MANTIS_GPIF_FETCHCMD (0x03 << 1)
+#define MANTIS_GPIF_HWORDDEV (0x01 << 0)
+
+#define MANTIS_GPIF_WSTOPER 0x90
+#define MANTIS_GPIF_WSTOPERWREN3 (0x01 << 31)
+#define MANTIS_GPIF_PARBOOTN (0x01 << 29)
+#define MANTIS_GPIF_WSTOPERSLID3 (0x1f << 24)
+#define MANTIS_GPIF_WSTOPERWREN2 (0x01 << 23)
+#define MANTIS_GPIF_WSTOPERSLID2 (0x1f << 16)
+#define MANTIS_GPIF_WSTOPERWREN1 (0x01 << 15)
+#define MANTIS_GPIF_WSTOPERSLID1 (0x1f << 8)
+#define MANTIS_GPIF_WSTOPERWREN0 (0x01 << 7)
+#define MANTIS_GPIF_WSTOPERSLID0 (0x1f << 0)
+
+#define MANTIS_GPIF_CS2RW 0x94
+#define MANTIS_GPIF_CS2RWWREN3 (0x01 << 31)
+#define MANTIS_GPIF_CS2RWDELY3 (0x3f << 24)
+#define MANTIS_GPIF_CS2RWWREN2 (0x01 << 23)
+#define MANTIS_GPIF_CS2RWDELY2 (0x3f << 16)
+#define MANTIS_GPIF_CS2RWWREN1 (0x01 << 15)
+#define MANTIS_GPIF_CS2RWDELY1 (0x3f << 8)
+#define MANTIS_GPIF_CS2RWWREN0 (0x01 << 7)
+#define MANTIS_GPIF_CS2RWDELY0 (0x3f << 0)
+
+#define MANTIS_GPIF_IRQCFG 0x98
+#define MANTIS_GPIF_IRQPOL (0x01 << 8)
+#define MANTIS_MASK_WRACK (0x01 << 7)
+#define MANTIS_MASK_BRRDY (0x01 << 6)
+#define MANTIS_MASK_OVFLW (0x01 << 5)
+#define MANTIS_MASK_OTHERR (0x01 << 4)
+#define MANTIS_MASK_WSTO (0x01 << 3)
+#define MANTIS_MASK_EXTIRQ (0x01 << 2)
+#define MANTIS_MASK_PLUGIN (0x01 << 1)
+#define MANTIS_MASK_PLUGOUT (0x01 << 0)
+
+#define MANTIS_GPIF_STATUS 0x9c
+#define MANTIS_SBUF_KILLOP (0x01 << 15)
+#define MANTIS_SBUF_OPDONE (0x01 << 14)
+#define MANTIS_SBUF_EMPTY (0x01 << 13)
+#define MANTIS_GPIF_DETSTAT (0x01 << 9)
+#define MANTIS_GPIF_INTSTAT (0x01 << 8)
+#define MANTIS_GPIF_WRACK (0x01 << 7)
+#define MANTIS_GPIF_BRRDY (0x01 << 6)
+#define MANTIS_SBUF_OVFLW (0x01 << 5)
+#define MANTIS_GPIF_OTHERR (0x01 << 4)
+#define MANTIS_SBUF_WSTO (0x01 << 3)
+#define MANTIS_GPIF_EXTIRQ (0x01 << 2)
+#define MANTIS_CARD_PLUGIN (0x01 << 1)
+#define MANTIS_CARD_PLUGOUT (0x01 << 0)
+
+#define MANTIS_GPIF_BRADDR 0xa0
+#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
+#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_BR_ADDR (0xfffffff << 0)
+
+#define MANTIS_GPIF_BRBYTES 0xa4
+#define MANTIS_GPIF_BRCNT (0xfff << 0)
+
+#define MANTIS_PCMCIA_RESET 0xa8
+#define MANTIS_PCMCIA_RSTVAL (0xff << 0)
+
+#define MANTIS_CARD_RESET 0xac
+
+#define MANTIS_GPIF_ADDR 0xb0
+#define MANTIS_GPIF_HIFRDWRN (0x01 << 31)
+#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
+#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_HIFADDR (0xfffffff << 0)
+
+#define MANTIS_GPIF_DOUT 0xb4
+#define MANTIS_GPIF_HIFDOUT (0xfffffff << 0)
+
+#define MANTIS_GPIF_DIN 0xb8
+#define MANTIS_GPIF_HIFDIN (0xfffffff << 0)
+
+#define MANTIS_GPIF_SPARE 0xbc
+#define MANTIS_GPIF_LOGICRD (0xffff << 16)
+#define MANTIS_GPIF_LOGICRW (0xffff << 0)
+
+#endif /* __MANTIS_REG_H */
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
new file mode 100644
index 0000000..7d2f239
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -0,0 +1,186 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_uart.h"
+
+struct mantis_uart_params {
+ enum mantis_baud baud_rate;
+ enum mantis_parity parity;
+};
+
+static struct {
+ char string[7];
+} rates[5] = {
+ { "9600" },
+ { "19200" },
+ { "38400" },
+ { "57600" },
+ { "115200" }
+};
+
+static struct {
+ char string[5];
+} parity[3] = {
+ { "NONE" },
+ { "ODD" },
+ { "EVEN" }
+};
+
+#define UART_MAX_BUF 16
+
+int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ u32 stat = 0, i;
+
+ /* get data */
+ for (i = 0; i < (config->bytes + 1); i++) {
+
+ stat = mmread(MANTIS_UART_STAT);
+
+ if (stat & MANTIS_UART_RXFIFO_FULL) {
+ dprintk(MANTIS_ERROR, 1, "RX Fifo FULL");
+ }
+ data[i] = mmread(MANTIS_UART_RXD) & 0x3f;
+
+ dprintk(MANTIS_DEBUG, 1, "Reading ... <%02x>", data[i] & 0x3f);
+
+ if (data[i] & (1 << 7)) {
+ dprintk(MANTIS_ERROR, 1, "UART framing error");
+ return -EINVAL;
+ }
+ if (data[i] & (1 << 6)) {
+ dprintk(MANTIS_ERROR, 1, "UART parity error");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void mantis_uart_work(struct work_struct *work)
+{
+ struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ u8 buf[16];
+ int i;
+
+ mantis_uart_read(mantis, buf);
+
+ for (i = 0; i < (config->bytes + 1); i++)
+ dprintk(MANTIS_INFO, 1, "UART BUF:%d <%02x> ", i, buf[i]);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+}
+
+static int mantis_uart_setup(struct mantis_pci *mantis,
+ struct mantis_uart_params *params)
+{
+ u32 reg;
+
+ mmwrite((mmread(MANTIS_UART_CTL) | (params->parity & 0x3)), MANTIS_UART_CTL);
+
+ reg = mmread(MANTIS_UART_BAUD);
+
+ switch (params->baud_rate) {
+ case MANTIS_BAUD_9600:
+ reg |= 0xd8;
+ break;
+ case MANTIS_BAUD_19200:
+ reg |= 0x6c;
+ break;
+ case MANTIS_BAUD_38400:
+ reg |= 0x36;
+ break;
+ case MANTIS_BAUD_57600:
+ reg |= 0x23;
+ break;
+ case MANTIS_BAUD_115200:
+ reg |= 0x11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mmwrite(reg, MANTIS_UART_BAUD);
+
+ return 0;
+}
+
+int mantis_uart_init(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ struct mantis_uart_params params;
+
+ /* default parity: */
+ params.baud_rate = config->baud_rate;
+ params.parity = config->parity;
+ dprintk(MANTIS_INFO, 1, "Initializing UART @ %sbps parity:%s",
+ rates[params.baud_rate].string,
+ parity[params.parity].string);
+
+ init_waitqueue_head(&mantis->uart_wq);
+ spin_lock_init(&mantis->uart_lock);
+
+ INIT_WORK(&mantis->uart_work, mantis_uart_work);
+
+ /* disable interrupt */
+ mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
+
+ mantis_uart_setup(mantis, &params);
+
+ /* default 1 byte */
+ mmwrite((mmread(MANTIS_UART_BAUD) | (config->bytes << 8)), MANTIS_UART_BAUD);
+
+ /* flush buffer */
+ mmwrite((mmread(MANTIS_UART_CTL) | MANTIS_UART_RXFLUSH), MANTIS_UART_CTL);
+
+ /* enable interrupt */
+ mmwrite(mmread(MANTIS_INT_MASK) | 0x800, MANTIS_INT_MASK);
+ mmwrite(mmread(MANTIS_UART_CTL) | MANTIS_UART_RXINT, MANTIS_UART_CTL);
+
+ schedule_work(&mantis->uart_work);
+ dprintk(MANTIS_DEBUG, 1, "UART succesfully initialized");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_uart_init);
+
+void mantis_uart_exit(struct mantis_pci *mantis)
+{
+ /* disable interrupt */
+ mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
+}
+EXPORT_SYMBOL_GPL(mantis_uart_exit);
diff --git a/drivers/media/dvb/mantis/mantis_uart.h b/drivers/media/dvb/mantis/mantis_uart.h
new file mode 100644
index 0000000..ffb62a0
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.h
@@ -0,0 +1,58 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_UART_H
+#define __MANTIS_UART_H
+
+#define MANTIS_UART_CTL 0xe0
+#define MANTIS_UART_RXINT (1 << 4)
+#define MANTIS_UART_RXFLUSH (1 << 2)
+
+#define MANTIS_UART_RXD 0xe8
+#define MANTIS_UART_BAUD 0xec
+
+#define MANTIS_UART_STAT 0xf0
+#define MANTIS_UART_RXFIFO_DATA (1 << 7)
+#define MANTIS_UART_RXFIFO_EMPTY (1 << 6)
+#define MANTIS_UART_RXFIFO_FULL (1 << 3)
+#define MANTIS_UART_FRAME_ERR (1 << 2)
+#define MANTIS_UART_PARITY_ERR (1 << 1)
+#define MANTIS_UART_RXTHRESH_INT (1 << 0)
+
+enum mantis_baud {
+ MANTIS_BAUD_9600 = 0,
+ MANTIS_BAUD_19200,
+ MANTIS_BAUD_38400,
+ MANTIS_BAUD_57600,
+ MANTIS_BAUD_115200
+};
+
+enum mantis_parity {
+ MANTIS_PARITY_NONE = 0,
+ MANTIS_PARITY_EVEN,
+ MANTIS_PARITY_ODD,
+};
+
+struct mantis_pci;
+
+extern int mantis_uart_init(struct mantis_pci *mantis);
+extern void mantis_uart_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_UART_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.c b/drivers/media/dvb/mantis/mantis_vp1033.c
new file mode 100644
index 0000000..4a723bd
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.c
@@ -0,0 +1,212 @@
+/*
+ Mantis VP-1033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "stv0299.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1033.h"
+#include "mantis_reg.h"
+
+u8 lgtdqcs001f_inittab[] = {
+ 0x01, 0x15,
+ 0x02, 0x00,
+ 0x03, 0x00,
+ 0x04, 0x2a,
+ 0x05, 0x85,
+ 0x06, 0x02,
+ 0x07, 0x00,
+ 0x08, 0x00,
+ 0x0c, 0x01,
+ 0x0d, 0x81,
+ 0x0e, 0x44,
+ 0x0f, 0x94,
+ 0x10, 0x3c,
+ 0x11, 0x84,
+ 0x12, 0xb9,
+ 0x13, 0xb5,
+ 0x14, 0x4f,
+ 0x15, 0xc9,
+ 0x16, 0x80,
+ 0x17, 0x36,
+ 0x18, 0xfb,
+ 0x19, 0xcf,
+ 0x1a, 0xbc,
+ 0x1c, 0x2b,
+ 0x1d, 0x27,
+ 0x1e, 0x00,
+ 0x1f, 0x0b,
+ 0x20, 0xa1,
+ 0x21, 0x60,
+ 0x22, 0x00,
+ 0x23, 0x00,
+ 0x28, 0x00,
+ 0x29, 0x28,
+ 0x2a, 0x14,
+ 0x2b, 0x0f,
+ 0x2c, 0x09,
+ 0x2d, 0x05,
+ 0x31, 0x1f,
+ 0x32, 0x19,
+ 0x33, 0xfc,
+ 0x34, 0x13,
+ 0xff, 0xff,
+};
+
+#define MANTIS_MODEL_NAME "VP-1033"
+#define MANTIS_DEV_TYPE "DVB-S/DSS"
+
+int lgtdqcs001f_tuner_set(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[4];
+ u32 div;
+
+
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
+
+ div = params->frequency / 250;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0x83;
+ buf[3] = 0xc0;
+
+ if (params->frequency < 1531000)
+ buf[3] |= 0x04;
+ else
+ buf[3] &= ~0x04;
+ if (i2c_transfer(adapter, &msg, 1) < 0) {
+ dprintk(MANTIS_ERROR, 1, "Write: I2C Transfer failed");
+ return -EIO;
+ }
+ msleep_interruptible(100);
+
+ return 0;
+}
+
+int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
+{
+ u8 aclk = 0;
+ u8 bclk = 0;
+
+ if (srate < 1500000) {
+ aclk = 0xb7;
+ bclk = 0x47;
+ } else if (srate < 3000000) {
+ aclk = 0xb7;
+ bclk = 0x4b;
+ } else if (srate < 7000000) {
+ aclk = 0xb7;
+ bclk = 0x4f;
+ } else if (srate < 14000000) {
+ aclk = 0xb7;
+ bclk = 0x53;
+ } else if (srate < 30000000) {
+ aclk = 0xb6;
+ bclk = 0x53;
+ } else if (srate < 45000000) {
+ aclk = 0xb4;
+ bclk = 0x51;
+ }
+ stv0299_writereg(fe, 0x13, aclk);
+ stv0299_writereg(fe, 0x14, bclk);
+
+ stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
+ stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
+ stv0299_writereg(fe, 0x21, ratio & 0xf0);
+
+ return 0;
+}
+
+struct stv0299_config lgtdqcs001f_config = {
+ .demod_address = 0x68,
+ .inittab = lgtdqcs001f_inittab,
+ .mclk = 88000000UL,
+ .invert = 0,
+ .skip_reinit = 0,
+ .volt13_op0_op1 = STV0299_VOLT13_OP0,
+ .min_delay_ms = 100,
+ .set_symbol_rate = lgtdqcs001f_set_symbol_rate,
+};
+
+static int vp1033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for STV0299 (DVB-S)");
+ fe = stv0299_attach(&lgtdqcs001f_config, adapter);
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = lgtdqcs001f_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "found STV0299 DVB-S frontend @ 0x%02x",
+ lgtdqcs001f_config.demod_address);
+
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-S STV0299 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1033_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1033_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.h b/drivers/media/dvb/mantis/mantis_vp1033.h
new file mode 100644
index 0000000..7daaa1b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-1033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1033_H
+#define __MANTIS_VP1033_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_1033_DVB_S 0x0016
+
+extern struct mantis_hwconfig vp1033_config;
+
+#endif /* __MANTIS_VP1033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.c b/drivers/media/dvb/mantis/mantis_vp1034.c
new file mode 100644
index 0000000..8e6ae55
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.c
@@ -0,0 +1,119 @@
+/*
+ Mantis VP-1034 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mb86a16.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1034.h"
+#include "mantis_reg.h"
+
+struct mb86a16_config vp1034_mb86a16_config = {
+ .demod_address = 0x08,
+ .set_voltage = vp1034_set_voltage,
+};
+
+#define MANTIS_MODEL_NAME "VP-1034"
+#define MANTIS_DEV_TYPE "DVB-S/DSS"
+
+int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ dprintk(MANTIS_ERROR, 1, "Polarization=[13V]");
+ gpio_set_bits(mantis, 13, 1);
+ gpio_set_bits(mantis, 14, 0);
+ break;
+ case SEC_VOLTAGE_18:
+ dprintk(MANTIS_ERROR, 1, "Polarization=[18V]");
+ gpio_set_bits(mantis, 13, 1);
+ gpio_set_bits(mantis, 14, 1);
+ break;
+ case SEC_VOLTAGE_OFF:
+ dprintk(MANTIS_ERROR, 1, "Frontend (dummy) POWERDOWN");
+ break;
+ default:
+ dprintk(MANTIS_ERROR, 1, "Invalid = (%d)", (u32) voltage);
+ return -EINVAL;
+ }
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+
+ return 0;
+}
+
+static int vp1034_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for MB86A16 (DVB-S/DSS)");
+ fe = mb86a16_attach(&vp1034_mb86a16_config, adapter);
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found MB86A16 DVB-S/DSS frontend @0x%02x",
+ vp1034_mb86a16_config.demod_address);
+
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1034_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1034_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.h b/drivers/media/dvb/mantis/mantis_vp1034.h
new file mode 100644
index 0000000..323f38e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-1034 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1034_H
+#define __MANTIS_VP1034_H
+
+#include "dvb_frontend.h"
+#include "mantis_common.h"
+
+
+#define MANTIS_VP_1034_DVB_S 0x0014
+
+extern struct mantis_hwconfig vp1034_config;
+extern int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
+
+#endif /* __MANTIS_VP1034_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
new file mode 100644
index 0000000..515346d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -0,0 +1,358 @@
+/*
+ Mantis VP-1041 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1041.h"
+#include "stb0899_reg.h"
+#include "stb0899_drv.h"
+#include "stb0899_cfg.h"
+#include "stb6100_cfg.h"
+#include "stb6100.h"
+#include "lnbp21.h"
+
+#define MANTIS_MODEL_NAME "VP-1041"
+#define MANTIS_DEV_TYPE "DSS/DVB-S/DVB-S2"
+
+static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = {
+
+ /* 0x0000000b, *//* SYSREG */
+ { STB0899_DEV_ID , 0x30 },
+ { STB0899_DISCNTRL1 , 0x32 },
+ { STB0899_DISCNTRL2 , 0x80 },
+ { STB0899_DISRX_ST0 , 0x04 },
+ { STB0899_DISRX_ST1 , 0x00 },
+ { STB0899_DISPARITY , 0x00 },
+ { STB0899_DISFIFO , 0x00 },
+ { STB0899_DISSTATUS , 0x20 },
+ { STB0899_DISF22 , 0x99 },
+ { STB0899_DISF22RX , 0xa8 },
+ /* SYSREG ? */
+ { STB0899_ACRPRESC , 0x11 },
+ { STB0899_ACRDIV1 , 0x0a },
+ { STB0899_ACRDIV2 , 0x05 },
+ { STB0899_DACR1 , 0x00 },
+ { STB0899_DACR2 , 0x00 },
+ { STB0899_OUTCFG , 0x00 },
+ { STB0899_MODECFG , 0x00 },
+ { STB0899_IRQSTATUS_3 , 0xfe },
+ { STB0899_IRQSTATUS_2 , 0x03 },
+ { STB0899_IRQSTATUS_1 , 0x7c },
+ { STB0899_IRQSTATUS_0 , 0xf4 },
+ { STB0899_IRQMSK_3 , 0xf3 },
+ { STB0899_IRQMSK_2 , 0xfc },
+ { STB0899_IRQMSK_1 , 0xff },
+ { STB0899_IRQMSK_0 , 0xff },
+ { STB0899_IRQCFG , 0x00 },
+ { STB0899_I2CCFG , 0x88 },
+ { STB0899_I2CRPT , 0x58 },
+ { STB0899_IOPVALUE5 , 0x00 },
+ { STB0899_IOPVALUE4 , 0x33 },
+ { STB0899_IOPVALUE3 , 0x6d },
+ { STB0899_IOPVALUE2 , 0x90 },
+ { STB0899_IOPVALUE1 , 0x60 },
+ { STB0899_IOPVALUE0 , 0x00 },
+ { STB0899_GPIO00CFG , 0x82 },
+ { STB0899_GPIO01CFG , 0x82 },
+ { STB0899_GPIO02CFG , 0x82 },
+ { STB0899_GPIO03CFG , 0x82 },
+ { STB0899_GPIO04CFG , 0x82 },
+ { STB0899_GPIO05CFG , 0x82 },
+ { STB0899_GPIO06CFG , 0x82 },
+ { STB0899_GPIO07CFG , 0x82 },
+ { STB0899_GPIO08CFG , 0x82 },
+ { STB0899_GPIO09CFG , 0x82 },
+ { STB0899_GPIO10CFG , 0x82 },
+ { STB0899_GPIO11CFG , 0x82 },
+ { STB0899_GPIO12CFG , 0x82 },
+ { STB0899_GPIO13CFG , 0x82 },
+ { STB0899_GPIO14CFG , 0x82 },
+ { STB0899_GPIO15CFG , 0x82 },
+ { STB0899_GPIO16CFG , 0x82 },
+ { STB0899_GPIO17CFG , 0x82 },
+ { STB0899_GPIO18CFG , 0x82 },
+ { STB0899_GPIO19CFG , 0x82 },
+ { STB0899_GPIO20CFG , 0x82 },
+ { STB0899_SDATCFG , 0xb8 },
+ { STB0899_SCLTCFG , 0xba },
+ { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
+ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
+ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
+ { STB0899_DIRCLKCFG , 0x82 },
+ { STB0899_CLKOUT27CFG , 0x7e },
+ { STB0899_STDBYCFG , 0x82 },
+ { STB0899_CS0CFG , 0x82 },
+ { STB0899_CS1CFG , 0x82 },
+ { STB0899_DISEQCOCFG , 0x20 },
+ { STB0899_GPIO32CFG , 0x82 },
+ { STB0899_GPIO33CFG , 0x82 },
+ { STB0899_GPIO34CFG , 0x82 },
+ { STB0899_GPIO35CFG , 0x82 },
+ { STB0899_GPIO36CFG , 0x82 },
+ { STB0899_GPIO37CFG , 0x82 },
+ { STB0899_GPIO38CFG , 0x82 },
+ { STB0899_GPIO39CFG , 0x82 },
+ { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
+ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
+ { STB0899_FILTCTRL , 0x00 },
+ { STB0899_SYSCTRL , 0x01 },
+ { STB0899_STOPCLK1 , 0x20 },
+ { STB0899_STOPCLK2 , 0x00 },
+ { STB0899_INTBUFSTATUS , 0x00 },
+ { STB0899_INTBUFCTRL , 0x0a },
+ { 0xffff , 0xff },
+};
+
+static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
+ { STB0899_DEMOD , 0x00 },
+ { STB0899_RCOMPC , 0xc9 },
+ { STB0899_AGC1CN , 0x01 },
+ { STB0899_AGC1REF , 0x10 },
+ { STB0899_RTC , 0x23 },
+ { STB0899_TMGCFG , 0x4e },
+ { STB0899_AGC2REF , 0x34 },
+ { STB0899_TLSR , 0x84 },
+ { STB0899_CFD , 0xf7 },
+ { STB0899_ACLC , 0x87 },
+ { STB0899_BCLC , 0x94 },
+ { STB0899_EQON , 0x41 },
+ { STB0899_LDT , 0xf1 },
+ { STB0899_LDT2 , 0xe3 },
+ { STB0899_EQUALREF , 0xb4 },
+ { STB0899_TMGRAMP , 0x10 },
+ { STB0899_TMGTHD , 0x30 },
+ { STB0899_IDCCOMP , 0xfd },
+ { STB0899_QDCCOMP , 0xff },
+ { STB0899_POWERI , 0x0c },
+ { STB0899_POWERQ , 0x0f },
+ { STB0899_RCOMP , 0x6c },
+ { STB0899_AGCIQIN , 0x80 },
+ { STB0899_AGC2I1 , 0x06 },
+ { STB0899_AGC2I2 , 0x00 },
+ { STB0899_TLIR , 0x30 },
+ { STB0899_RTF , 0x7f },
+ { STB0899_DSTATUS , 0x00 },
+ { STB0899_LDI , 0xbc },
+ { STB0899_CFRM , 0xea },
+ { STB0899_CFRL , 0x31 },
+ { STB0899_NIRM , 0x2b },
+ { STB0899_NIRL , 0x80 },
+ { STB0899_ISYMB , 0x1d },
+ { STB0899_QSYMB , 0xa6 },
+ { STB0899_SFRH , 0x2f },
+ { STB0899_SFRM , 0x68 },
+ { STB0899_SFRL , 0x40 },
+ { STB0899_SFRUPH , 0x2f },
+ { STB0899_SFRUPM , 0x68 },
+ { STB0899_SFRUPL , 0x40 },
+ { STB0899_EQUAI1 , 0x02 },
+ { STB0899_EQUAQ1 , 0xff },
+ { STB0899_EQUAI2 , 0x04 },
+ { STB0899_EQUAQ2 , 0x05 },
+ { STB0899_EQUAI3 , 0x02 },
+ { STB0899_EQUAQ3 , 0xfd },
+ { STB0899_EQUAI4 , 0x03 },
+ { STB0899_EQUAQ4 , 0x07 },
+ { STB0899_EQUAI5 , 0x08 },
+ { STB0899_EQUAQ5 , 0xf5 },
+ { STB0899_DSTATUS2 , 0x00 },
+ { STB0899_VSTATUS , 0x00 },
+ { STB0899_VERROR , 0x86 },
+ { STB0899_IQSWAP , 0x2a },
+ { STB0899_ECNT1M , 0x00 },
+ { STB0899_ECNT1L , 0x00 },
+ { STB0899_ECNT2M , 0x00 },
+ { STB0899_ECNT2L , 0x00 },
+ { STB0899_ECNT3M , 0x0a },
+ { STB0899_ECNT3L , 0xad },
+ { STB0899_FECAUTO1 , 0x06 },
+ { STB0899_FECM , 0x01 },
+ { STB0899_VTH12 , 0xb0 },
+ { STB0899_VTH23 , 0x7a },
+ { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH56 , 0x38 },
+ { STB0899_VTH67 , 0x34 },
+ { STB0899_VTH78 , 0x24 },
+ { STB0899_PRVIT , 0xff },
+ { STB0899_VITSYNC , 0x19 },
+ { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
+ { STB0899_TSULC , 0x42 },
+ { STB0899_RSLLC , 0x41 },
+ { STB0899_TSLPL , 0x12 },
+ { STB0899_TSCFGH , 0x0c },
+ { STB0899_TSCFGM , 0x00 },
+ { STB0899_TSCFGL , 0x00 },
+ { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */
+ { STB0899_RSSYNCDEL , 0x00 },
+ { STB0899_TSINHDELH , 0x02 },
+ { STB0899_TSINHDELM , 0x00 },
+ { STB0899_TSINHDELL , 0x00 },
+ { STB0899_TSLLSTKM , 0x1b },
+ { STB0899_TSLLSTKL , 0xb3 },
+ { STB0899_TSULSTKM , 0x00 },
+ { STB0899_TSULSTKL , 0x00 },
+ { STB0899_PCKLENUL , 0xbc },
+ { STB0899_PCKLENLL , 0xcc },
+ { STB0899_RSPCKLEN , 0xbd },
+ { STB0899_TSSTATUS , 0x90 },
+ { STB0899_ERRCTRL1 , 0xb6 },
+ { STB0899_ERRCTRL2 , 0x95 },
+ { STB0899_ERRCTRL3 , 0x8d },
+ { STB0899_DMONMSK1 , 0x27 },
+ { STB0899_DMONMSK0 , 0x03 },
+ { STB0899_DEMAPVIT , 0x5c },
+ { STB0899_PLPARM , 0x19 },
+ { STB0899_PDELCTRL , 0x48 },
+ { STB0899_PDELCTRL2 , 0x00 },
+ { STB0899_BBHCTRL1 , 0x00 },
+ { STB0899_BBHCTRL2 , 0x00 },
+ { STB0899_HYSTTHRESH , 0x77 },
+ { STB0899_MATCSTM , 0x00 },
+ { STB0899_MATCSTL , 0x00 },
+ { STB0899_UPLCSTM , 0x00 },
+ { STB0899_UPLCSTL , 0x00 },
+ { STB0899_DFLCSTM , 0x00 },
+ { STB0899_DFLCSTL , 0x00 },
+ { STB0899_SYNCCST , 0x00 },
+ { STB0899_SYNCDCSTM , 0x00 },
+ { STB0899_SYNCDCSTL , 0x00 },
+ { STB0899_ISI_ENTRY , 0x00 },
+ { STB0899_ISI_BIT_EN , 0x00 },
+ { STB0899_MATSTRM , 0xf0 },
+ { STB0899_MATSTRL , 0x02 },
+ { STB0899_UPLSTRM , 0x45 },
+ { STB0899_UPLSTRL , 0x60 },
+ { STB0899_DFLSTRM , 0xe3 },
+ { STB0899_DFLSTRL , 0x00 },
+ { STB0899_SYNCSTR , 0x47 },
+ { STB0899_SYNCDSTRM , 0x05 },
+ { STB0899_SYNCDSTRL , 0x18 },
+ { STB0899_CFGPDELSTATUS1 , 0x19 },
+ { STB0899_CFGPDELSTATUS2 , 0x2b },
+ { STB0899_BBFERRORM , 0x00 },
+ { STB0899_BBFERRORL , 0x01 },
+ { STB0899_UPKTERRORM , 0x00 },
+ { STB0899_UPKTERRORL , 0x00 },
+ { 0xffff , 0xff },
+};
+
+struct stb0899_config vp1041_stb0899_config = {
+ .init_dev = vp1041_stb0899_s1_init_1,
+ .init_s2_demod = stb0899_s2_init_2,
+ .init_s1_demod = vp1041_stb0899_s1_init_3,
+ .init_s2_fec = stb0899_s2_init_4,
+ .init_tst = stb0899_s1_init_5,
+
+ .demod_address = 0x68, /* 0xd0 >> 1 */
+
+ .xtal_freq = 27000000,
+ .inversion = IQ_SWAP_ON, /* 1 */
+
+ .lo_clk = 76500000,
+ .hi_clk = 99000000,
+
+ .esno_ave = STB0899_DVBS2_ESNO_AVE,
+ .esno_quant = STB0899_DVBS2_ESNO_QUANT,
+ .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
+ .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
+ .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
+ .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
+ .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
+ .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
+ .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
+
+ .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
+ .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
+ .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
+ .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
+
+ .tuner_get_frequency = stb6100_get_frequency,
+ .tuner_set_frequency = stb6100_set_frequency,
+ .tuner_set_bandwidth = stb6100_set_bandwidth,
+ .tuner_get_bandwidth = stb6100_get_bandwidth,
+ .tuner_set_rfsiggain = NULL,
+};
+
+struct stb6100_config vp1041_stb6100_config = {
+ .tuner_address = 0x60,
+ .refclock = 27000000,
+};
+
+static int vp1041_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+ mantis->fe = stb0899_attach(&vp1041_stb0899_config, adapter);
+ if (mantis->fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found STB0899 DVB-S/DVB-S2 frontend @0x%02x",
+ vp1041_stb0899_config.demod_address);
+
+ if (stb6100_attach(mantis->fe, &vp1041_stb6100_config, adapter)) {
+ if (!lnbp21_attach(mantis->fe, adapter, 0, 0))
+ dprintk(MANTIS_ERROR, 1, "No LNBP21 found!");
+ }
+ } else {
+ return -EREMOTEIO;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+
+
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1041_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1041_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.h b/drivers/media/dvb/mantis/mantis_vp1041.h
new file mode 100644
index 0000000..1ae5b3d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-1041 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1041_H
+#define __MANTIS_VP1041_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_1041_DVB_S2 0x0031
+#define SKYSTAR_HD2_10 0x0001
+#define SKYSTAR_HD2_20 0x0003
+#define CINERGY_S2_PCI_HD 0x1179
+
+extern struct mantis_hwconfig vp1041_config;
+
+#endif /* __MANTIS_VP1041_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.c b/drivers/media/dvb/mantis/mantis_vp2033.c
new file mode 100644
index 0000000..10ce817
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.c
@@ -0,0 +1,187 @@
+/*
+ Mantis VP-2033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "tda1002x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp2033.h"
+
+#define MANTIS_MODEL_NAME "VP-2033"
+#define MANTIS_DEV_TYPE "DVB-C"
+
+struct tda1002x_config vp2033_tda1002x_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+struct tda10023_config vp2033_tda10023_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+static u8 read_pwm(struct mantis_pci *mantis)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 b = 0xff;
+ u8 pwm;
+ struct i2c_msg msg[] = {
+ {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
+ {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
+ };
+
+ if ((i2c_transfer(adapter, msg, 2) != 2)
+ || (pwm == 0xff))
+ pwm = 0x48;
+
+ return pwm;
+}
+
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[6];
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
+ int i;
+
+#define CU1216_IF 36125000
+#define TUNER_MUL 62500
+
+ u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0xce;
+ buf[3] = (params->frequency < 150000000 ? 0x01 :
+ params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[4] = 0xde;
+ buf[5] = 0x20;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ /* wait for the pll lock */
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ for (i = 0; i < 20; i++) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
+ break;
+
+ msleep(10);
+ }
+
+ /* switch the charge pump to the lower current */
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[2];
+ buf[2] &= ~0x40;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int vp2033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
+ fe = tda10021_attach(&vp2033_tda1002x_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
+ vp2033_tda1002x_cu1216_config.demod_address);
+ } else {
+ fe = tda10023_attach(&vp2033_tda10023_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
+ vp2033_tda1002x_cu1216_config.demod_address);
+ }
+ }
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+
+ mantis->fe = fe;
+ dprintk(MANTIS_DEBUG, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp2033_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp2033_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.h b/drivers/media/dvb/mantis/mantis_vp2033.h
new file mode 100644
index 0000000..c55242b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-2033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP2033_H
+#define __MANTIS_VP2033_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_2033_DVB_C 0x0008
+
+extern struct mantis_hwconfig vp2033_config;
+
+#endif /* __MANTIS_VP2033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.c b/drivers/media/dvb/mantis/mantis_vp2040.c
new file mode 100644
index 0000000..a7ca233e8
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.c
@@ -0,0 +1,186 @@
+/*
+ Mantis VP-2040 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "tda1002x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp2040.h"
+
+#define MANTIS_MODEL_NAME "VP-2040"
+#define MANTIS_DEV_TYPE "DVB-C"
+
+struct tda1002x_config vp2040_tda1002x_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+struct tda10023_config vp2040_tda10023_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[6];
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
+ int i;
+
+#define CU1216_IF 36125000
+#define TUNER_MUL 62500
+
+ u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0xce;
+ buf[3] = (params->frequency < 150000000 ? 0x01 :
+ params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[4] = 0xde;
+ buf[5] = 0x20;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ /* wait for the pll lock */
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ for (i = 0; i < 20; i++) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
+ break;
+
+ msleep(10);
+ }
+
+ /* switch the charge pump to the lower current */
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[2];
+ buf[2] &= ~0x40;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static u8 read_pwm(struct mantis_pci *mantis)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 b = 0xff;
+ u8 pwm;
+ struct i2c_msg msg[] = {
+ {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
+ {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
+ };
+
+ if ((i2c_transfer(adapter, msg, 2) != 2)
+ || (pwm == 0xff))
+ pwm = 0x48;
+
+ return pwm;
+}
+
+static int vp2040_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
+ fe = tda10021_attach(&vp2040_tda1002x_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
+ vp2040_tda1002x_cu1216_config.demod_address);
+ } else {
+ fe = tda10023_attach(&vp2040_tda10023_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
+ vp2040_tda1002x_cu1216_config.demod_address);
+ }
+ }
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_DEBUG, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp2040_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp2040_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.h b/drivers/media/dvb/mantis/mantis_vp2040.h
new file mode 100644
index 0000000..d125e21
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.h
@@ -0,0 +1,32 @@
+/*
+ Mantis VP-2040 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP2040_H
+#define __MANTIS_VP2040_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_2040_DVB_C 0x0043
+#define CINERGY_C 0x1178
+#define CABLESTAR_HD2 0x0002
+
+extern struct mantis_hwconfig vp2040_config;
+
+#endif /* __MANTIS_VP2040_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.c b/drivers/media/dvb/mantis/mantis_vp3028.c
new file mode 100644
index 0000000..4155c83
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.c
@@ -0,0 +1,38 @@
+/*
+ Mantis VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "mantis_common.h"
+#include "mantis_vp3028.h"
+
+struct zl10353_config mantis_vp3028_config = {
+ .demod_address = 0x0f,
+};
+
+#define MANTIS_MODEL_NAME "VP-3028"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+struct mantis_hwconfig vp3028_mantis_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.h b/drivers/media/dvb/mantis/mantis_vp3028.h
new file mode 100644
index 0000000..b07be6a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3028_H
+#define __MANTIS_VP3028_H
+
+#include "dvb_frontend.h"
+#include "mantis_common.h"
+#include "zl10353.h"
+
+#define MANTIS_VP_3028_DVB_T 0x0028
+
+extern struct zl10353_config mantis_vp3028_config;
+extern struct mantis_hwconfig vp3028_mantis_config;
+
+#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.c b/drivers/media/dvb/mantis/mantis_vp3030.c
new file mode 100644
index 0000000..1f43342
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.c
@@ -0,0 +1,105 @@
+/*
+ Mantis VP-3030 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "zl10353.h"
+#include "tda665x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp3030.h"
+
+struct zl10353_config mantis_vp3030_config = {
+ .demod_address = 0x0f,
+};
+
+struct tda665x_config env57h12d5_config = {
+ .name = "ENV57H12D5 (ET-50DT)",
+ .addr = 0x60,
+ .frequency_min = 47000000,
+ .frequency_max = 862000000,
+ .frequency_offst = 3616667,
+ .ref_multiplier = 6, /* 1/6 MHz */
+ .ref_divider = 100000, /* 1/6 MHz */
+};
+
+#define MANTIS_MODEL_NAME "VP-3030"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+
+static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int err = 0;
+
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ err = mantis_frontend_power(mantis, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+
+ if (err == 0) {
+ msleep(250);
+ dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
+ fe = zl10353_attach(&mantis_vp3030_config, adapter);
+
+ if (!fe)
+ return -1;
+
+ tda665x_attach(fe, &env57h12d5_config, adapter);
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp3030_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp3030_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+
+ .i2c_mode = MANTIS_BYTE_MODE
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.h b/drivers/media/dvb/mantis/mantis_vp3030.h
new file mode 100644
index 0000000..5f12c42
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-3030 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3030_H
+#define __MANTIS_VP3030_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_3030_DVB_T 0x0024
+
+extern struct mantis_hwconfig vp3030_config;
+
+#endif /* __MANTIS_VP3030_H */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 3182a40..ae08b07 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
request_modules(btv);
}
+ init_bttv_i2c_ir(btv);
bttv_input_init(btv);
/* everything is fine */
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 63aa31a..407fa61 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv)
if (0 == btv->i2c_rc && i2c_scan)
do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
- /* Instantiate the IR receiver device, if present */
+ return btv->i2c_rc;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
if (0 == btv->i2c_rc) {
struct i2c_board_info info;
/* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv)
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
}
- return btv->i2c_rc;
}
int __devexit fini_bttv_i2c(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index a1d0e9c..6cccc2a 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
+extern void init_bttv_i2c_ir(struct bttv *btv);
extern int fini_bttv_i2c(struct bttv *btv);
#define bttv_printk if (bttv_verbose) printk
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index 3ccc8af..2bf57a4 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -124,15 +124,12 @@ struct cx23888_ir_state {
atomic_t rxclk_divider;
atomic_t rx_invert;
- struct kfifo *rx_kfifo;
+ struct kfifo rx_kfifo;
spinlock_t rx_kfifo_lock;
struct v4l2_subdev_ir_parameters tx_params;
struct mutex tx_params_lock;
atomic_t txclk_divider;
-
- struct kfifo *tx_kfifo;
- spinlock_t tx_kfifo_lock;
};
static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd)
@@ -522,6 +519,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
{
struct cx23888_ir_state *state = to_state(sd);
struct cx23885_dev *dev = state->dev;
+ unsigned long flags;
u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG);
u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG);
@@ -594,8 +592,9 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
if (i == 0)
break;
j = i * sizeof(u32);
- k = kfifo_put(state->rx_kfifo,
- (unsigned char *) rx_data, j);
+ k = kfifo_in_locked(&state->rx_kfifo,
+ (unsigned char *) rx_data, j,
+ &state->rx_kfifo_lock);
if (k != j)
kror++; /* rx_kfifo over run */
}
@@ -631,8 +630,11 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl);
*handled = true;
}
- if (kfifo_len(state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2)
+
+ spin_lock_irqsave(&state->rx_kfifo_lock, flags);
+ if (kfifo_len(&state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2)
events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
+ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
if (events)
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
@@ -657,7 +659,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
return 0;
}
- n = kfifo_get(state->rx_kfifo, buf, n);
+ n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock);
n /= sizeof(u32);
*num = n * sizeof(u32);
@@ -785,7 +787,12 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
- kfifo_reset(state->rx_kfifo);
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->rx_kfifo_lock, flags);
+ kfifo_reset(&state->rx_kfifo);
+ /* reset tx_fifo too if there is one... */
+ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
if (p->interrupt_enable)
irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
control_rx_enable(dev, p->enable);
@@ -892,7 +899,6 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
- kfifo_reset(state->tx_kfifo);
if (p->interrupt_enable)
irqenable_tx(dev, IRQEN_TSE);
control_tx_enable(dev, p->enable);
@@ -1168,18 +1174,8 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- state->rx_kfifo = kfifo_alloc(CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL,
- &state->rx_kfifo_lock);
- if (state->rx_kfifo == NULL)
- return -ENOMEM;
-
- spin_lock_init(&state->tx_kfifo_lock);
- state->tx_kfifo = kfifo_alloc(CX23888_IR_TX_KFIFO_SIZE, GFP_KERNEL,
- &state->tx_kfifo_lock);
- if (state->tx_kfifo == NULL) {
- kfifo_free(state->rx_kfifo);
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
return -ENOMEM;
- }
state->dev = dev;
state->id = V4L2_IDENT_CX23888_IR;
@@ -1211,8 +1207,7 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
} else {
- kfifo_free(state->rx_kfifo);
- kfifo_free(state->tx_kfifo);
+ kfifo_free(&state->rx_kfifo);
}
return ret;
}
@@ -1231,8 +1226,7 @@ int cx23888_ir_remove(struct cx23885_dev *dev)
state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfifo_free(state->rx_kfifo);
- kfifo_free(state->tx_kfifo);
+ kfifo_free(&state->rx_kfifo);
kfree(state);
/* Nothing more to free() as state held the actual v4l2_subdev object */
return 0;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index e930a67..bd6214d 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1815,6 +1815,8 @@ static int vidioc_qbuf(struct file *file, void *priv,
/* put the buffer in the 'queued' queue */
i = gspca_dev->fr_q;
gspca_dev->fr_queue[i] = index;
+ if (gspca_dev->fr_i == i)
+ gspca_dev->cur_frame = frame;
gspca_dev->fr_q = (i + 1) % gspca_dev->nframes;
PDEBUG(D_FRAM, "qbuf q:%d i:%d o:%d",
gspca_dev->fr_q,
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index aa2f3c7e..1b536f7d 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -48,6 +48,12 @@ static
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xa 2528")
}
}, {
+ .ident = "Fujitsu-Siemens Amilo Xi 2428",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 2428")
+ }
+ }, {
.ident = "Fujitsu-Siemens Amilo Xi 2528",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 4dbb882..0a6b8f0 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,7 +1533,7 @@ static void setexposure_96(struct gspca_dev *gspca_dev)
static void setsharpness_96(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
+ s8 val;
val = sd->sharpness;
if (val < 0) { /* auto */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 4cff803..0ca1c06 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2319,7 +2319,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum)
}
}
if (avg_lum > MAX_AVG_LUM) {
- if (sd->gain - 1 >= 0) {
+ if (sd->gain >= 1) {
sd->gain--;
set_gain(gspca_dev);
}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 487d405..96c6192 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -228,6 +228,7 @@ static const struct stv_init stv_bridge_init[] = {
/* This reg is written twice. Some kind of reset? */
{NULL, 0x1620, 0x80},
{NULL, 0x1620, 0x00},
+ {NULL, 0x1443, 0x00},
{NULL, 0x1423, 0x04},
{x1500, 0x1500, ARRAY_SIZE(x1500)},
{x1536, 0x1536, ARRAY_SIZE(x1536)},
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 716df6b..306b7d7 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -709,7 +709,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
spca504B_PollingDataReady(gspca_dev);
/* Init the cam width height with some values get on init ? */
- reg_w_riv(gspca_dev, 0x31, 0, 0x04);
+ reg_w_riv(gspca_dev, 0x31, 0x0004, 0x00);
spca504B_WaitCmdStatus(gspca_dev);
spca504B_PollingDataReady(gspca_dev);
break;
@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA504B: */
- reg_w_riv(gspca_dev, 0, 0x00, 0x21ad); /* hue */
- reg_w_riv(gspca_dev, 0, 0x01, 0x21ac); /* sat/hue */
- reg_w_riv(gspca_dev, 0, 0x00, 0x21a3); /* gamma */
+ reg_w_riv(gspca_dev, 0, 0x21ad, 0x00); /* hue */
+ reg_w_riv(gspca_dev, 0, 0x21ac, 0x01); /* sat/hue */
+ reg_w_riv(gspca_dev, 0, 0x21a3, 0x00); /* gamma */
break;
case BRIDGE_SPCA536:
- reg_w_riv(gspca_dev, 0, 0x40, 0x20f5);
- reg_w_riv(gspca_dev, 0, 0x01, 0x20f4);
- reg_w_riv(gspca_dev, 0, 0x00, 0x2089);
+ reg_w_riv(gspca_dev, 0, 0x20f5, 0x40);
+ reg_w_riv(gspca_dev, 0, 0x20f4, 0x01);
+ reg_w_riv(gspca_dev, 0, 0x2089, 0x00);
break;
}
if (pollreg)
@@ -887,11 +887,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
switch (sd->bridge) {
case BRIDGE_SPCA504B:
reg_w_riv(gspca_dev, 0x1d, 0x00, 0);
- reg_w_riv(gspca_dev, 0, 0x01, 0x2306);
- reg_w_riv(gspca_dev, 0, 0x00, 0x0d04);
- reg_w_riv(gspca_dev, 0, 0x00, 0x2000);
- reg_w_riv(gspca_dev, 0, 0x13, 0x2301);
- reg_w_riv(gspca_dev, 0, 0x00, 0x2306);
+ reg_w_riv(gspca_dev, 0x00, 0x2306, 0x01);
+ reg_w_riv(gspca_dev, 0x00, 0x0d04, 0x00);
+ reg_w_riv(gspca_dev, 0x00, 0x2000, 0x00);
+ reg_w_riv(gspca_dev, 0x00, 0x2301, 0x13);
+ reg_w_riv(gspca_dev, 0x00, 0x2306, 0x00);
/* fall thru */
case BRIDGE_SPCA533:
spca504B_PollingDataReady(gspca_dev);
@@ -1000,7 +1000,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
spca504B_WaitCmdStatus(gspca_dev);
break;
default:
- reg_w_riv(gspca_dev, 0x31, 0, 0x04);
+ reg_w_riv(gspca_dev, 0x31, 0x0004, 0x00);
spca504B_WaitCmdStatus(gspca_dev);
spca504B_PollingDataReady(gspca_dev);
break;
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index c090efc..71921c8 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -3009,6 +3009,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
int l;
frame = gspca_get_i_frame(gspca_dev);
+ if (frame == NULL) {
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
l = frame->data_end - frame->data;
if (len > frame->v4l2_buf.length - l)
len = frame->v4l2_buf.length - l;
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 6ffa64c..b421858 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -800,8 +800,8 @@ again:
return IRQ_HANDLED;
if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) {
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
- sizeof(int)) != sizeof(int)) {
+ if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.grabq_lock) != sizeof(int)) {
mchip_free_frame();
return IRQ_HANDLED;
}
@@ -811,7 +811,8 @@ again:
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock);
wake_up_interruptible(&meye.proc_list);
} else {
int size;
@@ -820,8 +821,8 @@ again:
mchip_free_frame();
goto again;
}
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
- sizeof(int)) != sizeof(int)) {
+ if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.grabq_lock) != sizeof(int)) {
mchip_free_frame();
goto again;
}
@@ -831,7 +832,8 @@ again:
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock);
wake_up_interruptible(&meye.proc_list);
}
mchip_free_frame();
@@ -859,8 +861,8 @@ static int meye_open(struct file *file)
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
return 0;
}
@@ -933,7 +935,8 @@ static int meyeioc_qbuf_capt(int *nb)
mchip_cont_compression_start();
meye.grab_buffer[*nb].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int));
+ kfifo_in_locked(&meye.grabq, (unsigned char *)nb, sizeof(int),
+ &meye.grabq_lock);
mutex_unlock(&meye.lock);
return 0;
@@ -965,7 +968,9 @@ static int meyeioc_sync(struct file *file, void *fh, int *i)
/* fall through */
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
- kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int));
+ if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
+ sizeof(int), &meye.doneq_lock) != sizeof(int))
+ break;
}
*i = meye.grab_buffer[*i].size;
mutex_unlock(&meye.lock);
@@ -1452,7 +1457,8 @@ static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags |= V4L2_BUF_FLAG_QUEUED;
buf->flags &= ~V4L2_BUF_FLAG_DONE;
meye.grab_buffer[buf->index].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int));
+ kfifo_in_locked(&meye.grabq, (unsigned char *)&buf->index,
+ sizeof(int), &meye.grabq_lock);
mutex_unlock(&meye.lock);
return 0;
@@ -1467,19 +1473,19 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
mutex_lock(&meye.lock);
- if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
+ if (kfifo_len(&meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
mutex_unlock(&meye.lock);
return -EAGAIN;
}
if (wait_event_interruptible(meye.proc_list,
- kfifo_len(meye.doneq) != 0) < 0) {
+ kfifo_len(&meye.doneq) != 0) < 0) {
mutex_unlock(&meye.lock);
return -EINTR;
}
- if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr,
- sizeof(int))) {
+ if (!kfifo_out_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock)) {
mutex_unlock(&meye.lock);
return -EBUSY;
}
@@ -1529,8 +1535,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
mutex_lock(&meye.lock);
mchip_hic_stop();
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
@@ -1572,7 +1578,7 @@ static unsigned int meye_poll(struct file *file, poll_table *wait)
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
- if (kfifo_len(meye.doneq))
+ if (kfifo_len(&meye.doneq))
res = POLLIN | POLLRDNORM;
mutex_unlock(&meye.lock);
return res;
@@ -1745,16 +1751,14 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
}
spin_lock_init(&meye.grabq_lock);
- meye.grabq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.grabq_lock);
- if (IS_ERR(meye.grabq)) {
+ if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
+ GFP_KERNEL)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
- meye.doneq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.doneq_lock);
- if (IS_ERR(meye.doneq)) {
+ if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
+ GFP_KERNEL)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc2;
}
@@ -1868,9 +1872,9 @@ outregions:
outenabledev:
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
outsonypienable:
- kfifo_free(meye.doneq);
+ kfifo_free(&meye.doneq);
outkfifoalloc2:
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.grabq);
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
@@ -1901,8 +1905,8 @@ static void __devexit meye_remove(struct pci_dev *pcidev)
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
- kfifo_free(meye.doneq);
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.doneq);
+ kfifo_free(&meye.grabq);
vfree(meye.grab_temp);
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 5f70a10..1321ad5 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -303,9 +303,9 @@ struct meye {
struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS];
int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */
struct mutex lock; /* mutex for open/mmap... */
- struct kfifo *grabq; /* queue for buffers to be grabbed */
+ struct kfifo grabq; /* queue for buffers to be grabbed */
spinlock_t grabq_lock; /* lock protecting the queue */
- struct kfifo *doneq; /* queue for grabbed buffers */
+ struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
struct video_device *video_dev; /* video device parameters */
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index fc4dd60..7438f8d 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
/* poll to verify out of standby. Must Poll this bit */
for (i = 0; i < 100; i++) {
mt9t112_reg_read(data, client, 0x0018);
- if (0x4000 & data)
+ if (!(0x4000 & data))
break;
mdelay(10);
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 2ba14fb..c167cc3 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -718,7 +718,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || !irq) {
+ if (!res || (int)irq <= 0) {
err = -ENODEV;
goto exit;
}
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 50b415e..f7f7e04 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
buf[0] = 0xff; /* fixed */
ret = send_control_msg(pdev,
- SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf));
+ SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
if (!mode && ret >= 0) {
if (value < 0)
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 7e42989..805226e 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -563,7 +563,7 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
struct v4l2_rect *rect = &a->c;
- unsigned int dummy, output_w, output_h,
+ unsigned int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 9f85e91..a7ad781 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -420,19 +420,6 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
ctrl |= SAA7134_MAIN_CTRL_TE5;
irq |= SAA7134_IRQ1_INTE_RA2_1 |
SAA7134_IRQ1_INTE_RA2_0;
-
- /* dma: setup channel 5 (= TS) */
-
- saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
- saa_writeb(SAA7134_TS_DMA1,
- ((dev->ts.nr_packets - 1) >> 8) & 0xff);
- /* TSNOPIT=0, TSCOLAP=0 */
- saa_writeb(SAA7134_TS_DMA2,
- (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
- saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
- saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
- SAA7134_RS_CONTROL_ME |
- (dev->ts.pt_ts.dma >> 12));
}
/* set task conditions + field handling */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 7dfecfc..ee5bff0 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -93,9 +93,9 @@ static int ts_open(struct file *file)
dprintk("open dev=%s\n", video_device_node_name(vdev));
err = -EBUSY;
if (!mutex_trylock(&dev->empress_tsq.vb_lock))
- goto done;
+ return err;
if (atomic_read(&dev->empress_users))
- goto done_up;
+ goto done;
/* Unmute audio */
saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
@@ -105,10 +105,8 @@ static int ts_open(struct file *file)
file->private_data = dev;
err = 0;
-done_up:
- mutex_unlock(&dev->empress_tsq.vb_lock);
done:
- unlock_kernel();
+ mutex_unlock(&dev->empress_tsq.vb_lock);
return err;
}
diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
index 03488ba..b9817d7 100644
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -250,6 +250,19 @@ int saa7134_ts_start(struct saa7134_dev *dev)
BUG_ON(dev->ts_started);
+ /* dma: setup channel 5 (= TS) */
+ saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
+ saa_writeb(SAA7134_TS_DMA1,
+ ((dev->ts.nr_packets - 1) >> 8) & 0xff);
+ /* TSNOPIT=0, TSCOLAP=0 */
+ saa_writeb(SAA7134_TS_DMA2,
+ (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
+ saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
+ saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
+ SAA7134_RS_CONTROL_ME |
+ (dev->ts.pt_ts.dma >> 12));
+
+ /* reset hardware TS buffers */
saa_writeb(SAA7134_TS_SERIAL1, 0x00);
saa_writeb(SAA7134_TS_SERIAL1, 0x03);
saa_writeb(SAA7134_TS_SERIAL1, 0x00);
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d69363f..f09c714 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -1827,7 +1827,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || !irq) {
+ if (!res || (int)irq <= 0) {
dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
err = -ENODEV;
goto exit;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0469d7a..ec8ef8c 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1393,7 +1393,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
size = entity->processing.bControlSize;
for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
- if (!usb_match_id(dev->intf, &blacklist[i].id))
+ if (!usb_match_one_id(dev->intf, &blacklist[i].id))
continue;
if (blacklist[i].index >= 8 * size ||
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index f854698..ea11839 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -59,9 +59,9 @@
* returns immediately.
*
* When the buffer is full, the completion handler removes it from the irq
- * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
+ * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
* At that point, any process waiting on the buffer will be woken up. If a
- * process tries to dequeue a buffer after it has been marked ready, the
+ * process tries to dequeue a buffer after it has been marked done, the
* dequeing will succeed immediately.
*
* 2. Buffers are queued, user is waiting on a buffer and the device gets
@@ -201,6 +201,7 @@ static void __uvc_query_buffer(struct uvc_buffer *buf,
break;
case UVC_BUF_STATE_QUEUED:
case UVC_BUF_STATE_ACTIVE:
+ case UVC_BUF_STATE_READY:
v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
break;
case UVC_BUF_STATE_IDLE:
@@ -295,13 +296,15 @@ static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
{
if (nonblocking) {
return (buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE)
+ buf->state != UVC_BUF_STATE_ACTIVE &&
+ buf->state != UVC_BUF_STATE_READY)
? 0 : -EAGAIN;
}
return wait_event_interruptible(buf->wait,
buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE);
+ buf->state != UVC_BUF_STATE_ACTIVE &&
+ buf->state != UVC_BUF_STATE_READY);
}
/*
@@ -348,6 +351,7 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue,
case UVC_BUF_STATE_IDLE:
case UVC_BUF_STATE_QUEUED:
case UVC_BUF_STATE_ACTIVE:
+ case UVC_BUF_STATE_READY:
default:
uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
"(driver bug?).\n", buf->state);
@@ -489,6 +493,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
+ buf->state = UVC_BUF_STATE_DONE;
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 9a98028..7dcf534 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -441,7 +441,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
if (fid != stream->last_fid && buf->buf.bytesused != 0) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
"toggled).\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
return -EAGAIN;
}
@@ -470,7 +470,7 @@ static void uvc_video_decode_data(struct uvc_streaming *stream,
/* Complete the current frame if the buffer size was exceeded. */
if (len > maxlen) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
}
}
@@ -482,7 +482,7 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
if (data[0] == len)
uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -568,8 +568,7 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
+ if (buf->state == UVC_BUF_STATE_READY)
buf = uvc_queue_next_buffer(&stream->queue, buf);
}
}
@@ -627,8 +626,7 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
if (!stream->bulk.skip_payload && buf != NULL) {
uvc_video_decode_end(stream, buf, stream->bulk.header,
stream->bulk.payload_size);
- if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
+ if (buf->state == UVC_BUF_STATE_READY)
buf = uvc_queue_next_buffer(&stream->queue,
buf);
}
@@ -669,7 +667,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
stream->bulk.payload_size == stream->bulk.max_payload_size) {
if (buf->buf.bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -924,10 +922,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
{
struct usb_interface *intf = stream->intf;
- struct usb_host_interface *alts;
- struct usb_host_endpoint *ep = NULL;
- int intfnum = stream->intfnum;
- unsigned int bandwidth, psize, i;
+ struct usb_host_endpoint *ep;
+ unsigned int i;
int ret;
stream->last_fid = -1;
@@ -936,6 +932,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
stream->bulk.payload_size = 0;
if (intf->num_altsetting > 1) {
+ struct usb_host_endpoint *best_ep = NULL;
+ unsigned int best_psize = 3 * 1024;
+ unsigned int bandwidth;
+ unsigned int uninitialized_var(altsetting);
+ int intfnum = stream->intfnum;
+
/* Isochronous endpoint, select the alternate setting. */
bandwidth = stream->ctrl.dwMaxPayloadTransferSize;
@@ -949,6 +951,9 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
}
for (i = 0; i < intf->num_altsetting; ++i) {
+ struct usb_host_interface *alts;
+ unsigned int psize;
+
alts = &intf->altsetting[i];
ep = uvc_find_endpoint(alts,
stream->header.bEndpointAddress);
@@ -958,21 +963,27 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
/* Check if the bandwidth is high enough. */
psize = le16_to_cpu(ep->desc.wMaxPacketSize);
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
- if (psize >= bandwidth)
- break;
+ if (psize >= bandwidth && psize <= best_psize) {
+ altsetting = i;
+ best_psize = psize;
+ best_ep = ep;
+ }
}
- if (i >= intf->num_altsetting) {
+ if (best_ep == NULL) {
uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting "
"for requested bandwidth.\n");
return -EIO;
}
- ret = usb_set_interface(stream->dev->udev, intfnum, i);
+ uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
+ "(%u B/frame bandwidth).\n", altsetting, best_psize);
+
+ ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
return ret;
- ret = uvc_init_video_isoc(stream, ep, gfp_flags);
+ ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
} else {
/* Bulk endpoint, proceed to URB initialization. */
ep = uvc_find_endpoint(&intf->altsetting[0],
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7ec9a04..2337585 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -365,8 +365,9 @@ enum uvc_buffer_state {
UVC_BUF_STATE_IDLE = 0,
UVC_BUF_STATE_QUEUED = 1,
UVC_BUF_STATE_ACTIVE = 2,
- UVC_BUF_STATE_DONE = 3,
- UVC_BUF_STATE_ERROR = 4,
+ UVC_BUF_STATE_READY = 3,
+ UVC_BUF_STATE_DONE = 4,
+ UVC_BUF_STATE_ERROR = 5,
};
struct uvc_buffer {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 610e914..44d2037 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
- unsigned long mem_phys;
+ resource_size_t mem_phys;
unsigned long port;
u32 msize;
u32 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return -EINVAL;
}
ioc->memmap = mem;
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n",
- ioc->name, mem, mem_phys));
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
+ ioc->name, mem, (unsigned long long)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
+ else if (ioc->bus_type == SAS)
+ num_chain *= MPT_SAS_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 5775275..81279b3 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
"Command not in the active list! (sc=%p)\n", ioc->name,
SCpnt));
- retval = 0;
+ retval = SUCCESS;
goto out;
}
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index efba702..3d5f40c 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -40,8 +40,7 @@
#define SG_TABLESIZE 30
-static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
- unsigned long);
+static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
static spinlock_t i2o_config_lock;
@@ -751,7 +750,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
lock_kernel();
switch (cmd) {
case I2OGETIOPS:
- ret = i2o_cfg_ioctl(NULL, file, cmd, arg);
+ ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
ret = i2o_cfg_passthru32(file, cmd, arg);
@@ -984,11 +983,11 @@ out:
/*
* IOCTL Handler
*/
-static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
- unsigned long arg)
+static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int ret;
+ lock_kernel();
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_getiops(arg);
@@ -1044,7 +1043,7 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
osm_debug("unknown ioctl called!\n");
ret = -EINVAL;
}
-
+ unlock_kernel();
return ret;
}
@@ -1118,7 +1117,7 @@ static int cfg_release(struct inode *inode, struct file *file)
static const struct file_operations config_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .ioctl = i2o_cfg_ioctl,
+ .unlocked_ioctl = i2o_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = i2o_cfg_compat_ioctl,
#endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ca2f2c4..e09eb48 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -3,7 +3,7 @@
#
obj-$(CONFIG_MFD_SM501) += sm501.o
-obj-$(CONFIG_MFD_ASIC3) += asic3.o
+obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
@@ -11,9 +11,9 @@ obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
-obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o
-obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o
-obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
+obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
+obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
+obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index e22128c..95c1e6b 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -80,6 +80,7 @@ struct asic3 {
u16 irq_bothedge[4];
struct gpio_chip gpio;
struct device *dev;
+ void __iomem *tmio_cnf;
struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
};
@@ -685,8 +686,24 @@ static struct mfd_cell asic3_cell_ds1wm = {
.resources = ds1wm_resources,
};
+static void asic3_mmc_pwr(struct platform_device *pdev, int state)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ tmio_core_mmc_pwr(asic->tmio_cnf, 1 - asic->bus_shift, state);
+}
+
+static void asic3_mmc_clk_div(struct platform_device *pdev, int state)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ tmio_core_mmc_clk_div(asic->tmio_cnf, 1 - asic->bus_shift, state);
+}
+
static struct tmio_mmc_data asic3_mmc_data = {
- .hclk = 24576000,
+ .hclk = 24576000,
+ .set_pwr = asic3_mmc_pwr,
+ .set_clk_div = asic3_mmc_clk_div,
};
static struct resource asic3_mmc_resources[] = {
@@ -696,11 +713,6 @@ static struct resource asic3_mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = ASIC3_SD_CONFIG_BASE,
- .end = ASIC3_SD_CONFIG_BASE + 0x1ff,
- .flags = IORESOURCE_MEM,
- },
- {
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
@@ -743,6 +755,10 @@ static int asic3_mmc_enable(struct platform_device *pdev)
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_SDPWR, 1);
+ /* ASIC3_SD_CTRL_BASE assumes 32-bit addressing, TMIO is 16-bit */
+ tmio_core_mmc_enable(asic->tmio_cnf, 1 - asic->bus_shift,
+ ASIC3_SD_CTRL_BASE >> 1);
+
return 0;
}
@@ -797,10 +813,15 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
/* MMC */
+ asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
+ mem_sdio->start, 0x400 >> asic->bus_shift);
+ if (!asic->tmio_cnf) {
+ ret = -ENOMEM;
+ dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
+ goto out;
+ }
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- asic3_mmc_resources[1].start >>= asic->bus_shift;
- asic3_mmc_resources[1].end >>= asic->bus_shift;
asic3_cell_mmc.platform_data = &asic3_cell_mmc;
asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
@@ -820,7 +841,10 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
static void asic3_mfd_remove(struct platform_device *pdev)
{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
mfd_remove_devices(&pdev->dev);
+ iounmap(asic->tmio_cnf);
}
/* Core */
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index a1ade23..735c8a4 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -619,6 +619,8 @@ err_revision:
}
/* This should go away (END) */
+ mc13783_unlock(mc13783);
+
if (pdata->flags & MC13783_USE_ADC)
mc13783_add_subdevice(mc13783, "mc13783-adc");
@@ -641,8 +643,6 @@ err_revision:
if (pdata->flags & MC13783_USE_TOUCHSCREEN)
mc13783_add_subdevice(mc13783, "mc13783-ts");
- mc13783_unlock(mc13783);
-
return 0;
}
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 0a255c1..bcf4687 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -38,6 +38,19 @@ enum {
T7L66XB_CELL_MMC,
};
+static const struct resource t7l66xb_mmc_resources[] = {
+ {
+ .start = 0x800,
+ .end = 0x9ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_T7L66XB_MMC,
+ .end = IRQ_T7L66XB_MMC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
#define SCR_REVID 0x08 /* b Revision ID */
#define SCR_IMR 0x42 /* b Interrupt Mask */
#define SCR_DEV_CTL 0xe0 /* b Device control */
@@ -83,6 +96,9 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
spin_unlock_irqrestore(&t7l66xb->lock, flags);
+ tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
+ t7l66xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
@@ -106,28 +122,28 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
return 0;
}
+static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
+}
+
+static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
+}
+
/*--------------------------------------------------------------------------*/
static struct tmio_mmc_data t7166xb_mmc_data = {
.hclk = 24000000,
-};
-
-static const struct resource t7l66xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_T7L66XB_MMC,
- .end = IRQ_T7L66XB_MMC,
- .flags = IORESOURCE_IRQ,
- },
+ .set_pwr = t7l66xb_mmc_pwr,
+ .set_clk_div = t7l66xb_mmc_clk_div,
};
static const struct resource t7l66xb_nand_resources[] = {
@@ -282,6 +298,9 @@ static int t7l66xb_resume(struct platform_device *dev)
if (pdata && pdata->resume)
pdata->resume(dev);
+ tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
+ t7l66xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
#else
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 3280ab33..5c7f043 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -22,28 +22,52 @@ enum {
TC6387XB_CELL_MMC,
};
+struct tc6387xb {
+ void __iomem *scr;
+ struct clk *clk32k;
+ struct resource rscr;
+};
+
+static struct resource tc6387xb_mmc_resources[] = {
+ {
+ .start = 0x800,
+ .end = 0x9ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/*--------------------------------------------------------------------------*/
+
#ifdef CONFIG_PM
static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
{
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
if (pdata && pdata->suspend)
pdata->suspend(dev);
- clk_disable(clk32k);
+ clk_disable(tc6387xb->clk32k);
return 0;
}
static int tc6387xb_resume(struct platform_device *dev)
{
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
- clk_enable(clk32k);
+ clk_enable(tc6387xb->clk32k);
if (pdata && pdata->resume)
pdata->resume(dev);
+ tmio_core_mmc_resume(tc6387xb->scr + 0x200, 0,
+ tc6387xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
#else
@@ -53,12 +77,32 @@ static int tc6387xb_resume(struct platform_device *dev)
/*--------------------------------------------------------------------------*/
+static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
+}
+
+static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
+}
+
+
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- clk_enable(clk32k);
+ clk_enable(tc6387xb->clk32k);
+
+ tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
+ tc6387xb_mmc_resources[0].start & 0xfffe);
return 0;
}
@@ -66,36 +110,20 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- clk_disable(clk32k);
+ clk_disable(tc6387xb->clk32k);
return 0;
}
-/*--------------------------------------------------------------------------*/
-
static struct tmio_mmc_data tc6387xb_mmc_data = {
.hclk = 24000000,
+ .set_pwr = tc6387xb_mmc_pwr,
+ .set_clk_div = tc6387xb_mmc_clk_div,
};
-static struct resource tc6387xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
+/*--------------------------------------------------------------------------*/
static struct mfd_cell tc6387xb_cells[] = {
[TC6387XB_CELL_MMC] = {
@@ -111,8 +139,9 @@ static struct mfd_cell tc6387xb_cells[] = {
static int tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
- struct resource *iomem;
+ struct resource *iomem, *rscr;
struct clk *clk32k;
+ struct tc6387xb *tc6387xb;
int irq, ret;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -120,18 +149,40 @@ static int tc6387xb_probe(struct platform_device *dev)
return -EINVAL;
}
+ tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL);
+ if (!tc6387xb)
+ return -ENOMEM;
+
ret = platform_get_irq(dev, 0);
if (ret >= 0)
irq = ret;
else
- goto err_resource;
+ goto err_no_irq;
clk32k = clk_get(&dev->dev, "CLK_CK32K");
if (IS_ERR(clk32k)) {
ret = PTR_ERR(clk32k);
+ goto err_no_clk;
+ }
+
+ rscr = &tc6387xb->rscr;
+ rscr->name = "tc6387xb-core";
+ rscr->start = iomem->start;
+ rscr->end = iomem->start + 0xff;
+ rscr->flags = IORESOURCE_MEM;
+
+ ret = request_resource(iomem, rscr);
+ if (ret)
goto err_resource;
+
+ tc6387xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ if (!tc6387xb->scr) {
+ ret = -ENOMEM;
+ goto err_ioremap;
}
- platform_set_drvdata(dev, clk32k);
+
+ tc6387xb->clk32k = clk32k;
+ platform_set_drvdata(dev, tc6387xb);
if (pdata && pdata->enable)
pdata->enable(dev);
@@ -149,8 +200,13 @@ static int tc6387xb_probe(struct platform_device *dev)
if (!ret)
return 0;
- clk_put(clk32k);
+err_ioremap:
+ release_resource(&tc6387xb->rscr);
err_resource:
+ clk_put(clk32k);
+err_no_clk:
+err_no_irq:
+ kfree(tc6387xb);
return ret;
}
@@ -195,3 +251,4 @@ MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton");
MODULE_ALIAS("platform:tc6387xb");
+
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1429a73..4bc5a08 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,10 +136,6 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
-static struct tmio_mmc_data tc6393xb_mmc_data = {
- .hclk = 24000000,
-};
-
static struct resource __devinitdata tc6393xb_nand_resources[] = {
{
.start = 0x1000,
@@ -165,11 +161,6 @@ static struct resource __devinitdata tc6393xb_mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
.start = IRQ_TC6393_MMC,
.end = IRQ_TC6393_MMC,
.flags = IORESOURCE_IRQ,
@@ -346,6 +337,50 @@ int tc6393xb_lcd_mode(struct platform_device *fb,
}
EXPORT_SYMBOL(tc6393xb_lcd_mode);
+static int tc6393xb_mmc_enable(struct platform_device *mmc)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
+ tc6393xb_mmc_resources[0].start & 0xfffe);
+
+ return 0;
+}
+
+static int tc6393xb_mmc_resume(struct platform_device *mmc)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
+ tc6393xb_mmc_resources[0].start & 0xfffe);
+
+ return 0;
+}
+
+static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
+}
+
+static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
+}
+
+static struct tmio_mmc_data tc6393xb_mmc_data = {
+ .hclk = 24000000,
+ .set_pwr = tc6393xb_mmc_pwr,
+ .set_clk_div = tc6393xb_mmc_clk_div,
+};
+
static struct mfd_cell __devinitdata tc6393xb_cells[] = {
[TC6393XB_CELL_NAND] = {
.name = "tmio-nand",
@@ -355,6 +390,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
},
[TC6393XB_CELL_MMC] = {
.name = "tmio-mmc",
+ .enable = tc6393xb_mmc_enable,
+ .resume = tc6393xb_mmc_resume,
.driver_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
@@ -836,3 +873,4 @@ MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
MODULE_ALIAS("platform:tc6393xb");
+
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
new file mode 100644
index 0000000..eddc19a
--- /dev/null
+++ b/drivers/mfd/tmio_core.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright(c) 2009 Ian Molton <spyro@f2s.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/tmio.h>
+
+int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
+{
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
+ sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
+
+ /* Disable SD power during suspend */
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_3, 0x01);
+
+ /* The below is required but why? FIXME */
+ sd_config_write8(cnf, shift, CNF_STOP_CLK_CTL, 0x1f);
+
+ /* Power down SD bus */
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_2, 0x00);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_core_mmc_enable);
+
+int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base)
+{
+
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
+ sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_core_mmc_resume);
+
+void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state)
+{
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_2, state ? 0x02 : 0x00);
+}
+EXPORT_SYMBOL(tmio_core_mmc_pwr);
+
+void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state)
+{
+ sd_config_write8(cnf, shift, CNF_SD_CLK_MODE, state ? 1 : 0);
+}
+EXPORT_SYMBOL(tmio_core_mmc_clk_div);
+
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 20d29ba..9df9a5a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -568,12 +568,12 @@ static void twl4030_sih_do_edge(struct work_struct *work)
bytes[byte] &= ~(0x03 << off);
- spin_lock_irq(&d->lock);
+ raw_spin_lock_irq(&d->lock);
if (d->status & IRQ_TYPE_EDGE_RISING)
bytes[byte] |= BIT(off + 1);
if (d->status & IRQ_TYPE_EDGE_FALLING)
bytes[byte] |= BIT(off + 0);
- spin_unlock_irq(&d->lock);
+ raw_spin_unlock_irq(&d->lock);
edge_change &= ~BIT(i);
}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 8485a70..9a970bd 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -134,8 +134,7 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg)
wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
return 0;
- if ((reg == WM8350_GPIO_CONFIGURATION_I_O) ||
- (reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
+ if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
(reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index c8df547..9025f29 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -434,7 +434,7 @@ int wm8350_register_irq(struct wm8350 *wm8350, int irq,
irq_handler_t handler, unsigned long flags,
const char *name, void *data)
{
- if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
+ if (irq < 0 || irq >= WM8350_NUM_IRQ || !handler)
return -EINVAL;
if (wm8350->irq[irq].handler)
@@ -453,7 +453,7 @@ EXPORT_SYMBOL_GPL(wm8350_register_irq);
int wm8350_free_irq(struct wm8350 *wm8350, int irq)
{
- if (irq < 0 || irq > WM8350_NUM_IRQ)
+ if (irq < 0 || irq >= WM8350_NUM_IRQ)
return -EINVAL;
wm8350_mask_irq(wm8350, irq);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 59f4ba1..e3551d2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -203,6 +203,7 @@ config CS5535_MFGPT
config CS5535_MFGPT_DEFAULT_IRQ
int
+ depends on CS5535_MFGPT
default 7
help
MFGPTs on the CS5535 require an interrupt. The selected IRQ
@@ -248,19 +249,6 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
-config DELL_LAPTOP
- tristate "Dell Laptop Extras (EXPERIMENTAL)"
- depends on X86
- depends on DCDBAS
- depends on EXPERIMENTAL
- depends on BACKLIGHT_CLASS_DEVICE
- depends on RFKILL
- depends on POWER_SUPPLY
- default n
- ---help---
- This driver adds support for rfkill and backlight control to Dell
- laptops.
-
config ISL29003
tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index e9eae4a..1eac626 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
+ [ENCLOSURE_STATUS_MAX] = NULL,
};
static const char *const enclosure_type [] = {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 85f0e8c..1f552c6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
+ int devmaj = MAJOR(disk_devt(md->disk));
int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
+
+ if (!devmaj)
+ devidx = md->disk->first_minor >> MMC_SHIFT;
+
+ blk_cleanup_queue(md->queue.queue);
+
__clear_bit(devidx, dev_use);
put_disk(md->disk);
@@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card)
return 0;
out:
+ mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
return err;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84..e7f8027 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
}
mrq->cmd->arg = dev_addr;
+ if (!mmc_card_blockaddr(test->card))
+ mrq->cmd->arg <<= 9;
+
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
}
for (i = 0;i < BUFFER_SIZE / 512;i++) {
- ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
memset(test->buffer, 0, 512);
for (i = 0;i < BUFFER_SIZE / 512;i++) {
- ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
for (i = 0;i < sectors;i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
- dev_addr + i * 512, 512, 0);
+ dev_addr + i, 512, 0);
if (ret)
return ret;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 49e5823..c5a7a85 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q)
struct request *req;
if (!mq) {
- printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = blk_fetch_request(q)) != NULL)
+ while ((req = blk_fetch_request(q)) != NULL) {
+ req->cmd_flags |= REQ_QUIET;
__blk_end_request_all(req, -EIO);
+ }
return;
}
@@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- /* Mark that we should start throwing out stragglers */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
/* Then terminate our worker thread */
kthread_stop(mq->thread);
+ /* Empty the queue */
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->queuedata = NULL;
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
@@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
- blk_cleanup_queue(mq->queue);
-
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c111894..0eac6c8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- if (card->ext_csd.rev > 3) {
+ if (card->ext_csd.rev > 5) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
card->ext_csd.rev);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index cdb845b..06b6408 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
* The number of functions on the card is encoded inside
* the ocr.
*/
- card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28;
+ funcs = (ocr & 0x70000000) >> 28;
+ card->sdio_funcs = 0;
/*
* If needed, disconnect card detection pull-up resistor.
@@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
/*
* Initialize (but don't add) all present functions.
*/
- for (i = 0;i < funcs;i++) {
+ for (i = 0; i < funcs; i++, card->sdio_funcs++) {
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d37464e..9e060c8 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func)
/*
* Unregister a SDIO function with the driver model, and
* (eventually) free it.
+ * This function can be called through error paths where sdio_add_func() was
+ * never executed (because a failure occurred at an earlier point).
*/
void sdio_remove_func(struct sdio_func *func)
{
- if (sdio_func_present(func))
- device_del(&func->dev);
+ if (!sdio_func_present(func))
+ return;
+ device_del(&func->dev);
put_device(&func->dev);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9d405b1..ce1d288 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS
This is silent Kconfig symbol that is selected by the drivers that
need to overwrite SDHCI IO memory accessors.
+config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ bool
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This option is selected by drivers running on big endian hosts
+ and performing I/O to a SDHCI controller through a bus that
+ implements a hardware byte swapper using a 32-bit datum.
+ This endian mapping mode is called "data invariance" and
+ has the effect of scrambling the addresses and formats of data
+ accessed in sizes other than the datum size.
+
+ This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
+
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
@@ -75,11 +88,29 @@ config MMC_RICOH_MMC
config MMC_SDHCI_OF
tristate "SDHCI support on OpenFirmware platforms"
depends on MMC_SDHCI && PPC_OF
- select MMC_SDHCI_IO_ACCESSORS
help
This selects the OF support for Secure Digital Host Controller
- Interfaces. So far, only the Freescale eSDHC controller is known
- to exist on OF platforms.
+ Interfaces.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_ESDHC
+ bool "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_OF
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Freescale eSDHC controller support.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_HLWD
+ bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_OF
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the "Hollywood" chipset of the Nintendo Wii video game
+ console.
If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ded4d8cd..3d253dd 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
-obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -37,6 +36,11 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
+sdhci-of-y := sdhci-of-core.o
+sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
+sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
endif
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c
index 01ab916..55e3313 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -22,62 +22,37 @@
#include <linux/of_platform.h>
#include <linux/mmc/host.h>
#include <asm/machdep.h>
+#include "sdhci-of.h"
#include "sdhci.h"
-struct sdhci_of_data {
- unsigned int quirks;
- struct sdhci_ops ops;
-};
-
-struct sdhci_of_host {
- unsigned int clock;
- u16 xfer_mode_shadow;
-};
+#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
/*
- * Ops and quirks for the Freescale eSDHC controller.
+ * These accessors are designed for big endian hosts doing I/O to
+ * little endian controllers incorporating a 32-bit hardware byte swapper.
*/
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
-#define ESDHC_HOST_CONTROL_RES 0x05
-
-static u32 esdhc_readl(struct sdhci_host *host, int reg)
+u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
{
return in_be32(host->ioaddr + reg);
}
-static u16 esdhc_readw(struct sdhci_host *host, int reg)
+u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
{
- u16 ret;
-
- if (unlikely(reg == SDHCI_HOST_VERSION))
- ret = in_be16(host->ioaddr + reg);
- else
- ret = in_be16(host->ioaddr + (reg ^ 0x2));
- return ret;
+ return in_be16(host->ioaddr + (reg ^ 0x2));
}
-static u8 esdhc_readb(struct sdhci_host *host, int reg)
+u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
{
return in_8(host->ioaddr + (reg ^ 0x3));
}
-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
+void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
{
out_be32(host->ioaddr + reg, val);
}
-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
int base = reg & ~0x3;
@@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
of_host->xfer_mode_shadow = val;
return;
case SDHCI_COMMAND:
- esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow,
- SDHCI_TRANSFER_MODE);
+ sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
+ SDHCI_TRANSFER_MODE);
return;
- case SDHCI_BLOCK_SIZE:
- /*
- * Two last DMA bits are reserved, and first one is used for
- * non-standard blksz of 4096 bytes that we don't support
- * yet. So clear the DMA boundary bits.
- */
- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
- /* fall through */
}
clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
}
-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
{
int base = reg & ~0x3;
int shift = (reg & 0x3) * 8;
- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
- if (reg == SDHCI_HOST_CONTROL)
- val &= ~ESDHC_HOST_CONTROL_RES;
-
clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
}
-
-static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- int pre_div = 2;
- int div = 1;
-
- clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-
- if (clock == 0)
- goto out;
-
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host->max_clk / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
- div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
- mdelay(100);
-out:
- host->clock = clock;
-}
-
-static int esdhc_enable_dma(struct sdhci_host *host)
-{
- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
- return 0;
-}
-
-static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
-
- return of_host->clock;
-}
-
-static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
-
- return of_host->clock / 256 / 16;
-}
-
-static struct sdhci_of_data sdhci_esdhc = {
- .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
- SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_NO_BUSY_IRQ |
- SDHCI_QUIRK_NONSTANDARD_CLOCK |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_PIO_NEEDS_DELAY |
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
- SDHCI_QUIRK_NO_CARD_NO_RESET,
- .ops = {
- .readl = esdhc_readl,
- .readw = esdhc_readw,
- .readb = esdhc_readb,
- .writel = esdhc_writel,
- .writew = esdhc_writew,
- .writeb = esdhc_writeb,
- .set_clock = esdhc_set_clock,
- .enable_dma = esdhc_enable_dma,
- .get_max_clock = esdhc_get_max_clock,
- .get_min_clock = esdhc_get_min_clock,
- },
-};
+#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
#ifdef CONFIG_PM
@@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
}
static const struct of_device_id sdhci_of_match[] = {
+#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
{ .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
+#endif
+#ifdef CONFIG_MMC_SDHCI_OF_HLWD
+ { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
+#endif
{ .compatible = "generic-sdhci", },
{},
};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 0000000..d5b11a1
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,143 @@
+/*
+ * Freescale eSDHC controller driver.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include "sdhci-of.h"
+#include "sdhci.h"
+
+/*
+ * Ops and quirks for the Freescale eSDHC controller.
+ */
+
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
+
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+#define ESDHC_HOST_CONTROL_RES 0x05
+
+static u16 esdhc_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ ret = in_be16(host->ioaddr + reg);
+ else
+ ret = sdhci_be32bs_readw(host, reg);
+ return ret;
+}
+
+static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ if (reg == SDHCI_BLOCK_SIZE) {
+ /*
+ * Two last DMA bits are reserved, and first one is used for
+ * non-standard blksz of 4096 bytes that we don't support
+ * yet. So clear the DMA boundary bits.
+ */
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ }
+ sdhci_be32bs_writew(host, val, reg);
+}
+
+static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
+ if (reg == SDHCI_HOST_CONTROL)
+ val &= ~ESDHC_HOST_CONTROL_RES;
+ sdhci_be32bs_writeb(host, val, reg);
+}
+
+static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int pre_div = 2;
+ int div = 1;
+
+ clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
+ ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
+
+ if (clock == 0)
+ goto out;
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
+ ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
+ div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
+ mdelay(100);
+out:
+ host->clock = clock;
+}
+
+static int esdhc_enable_dma(struct sdhci_host *host)
+{
+ setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
+ return 0;
+}
+
+static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_of_host *of_host = sdhci_priv(host);
+
+ return of_host->clock;
+}
+
+static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_of_host *of_host = sdhci_priv(host);
+
+ return of_host->clock / 256 / 16;
+}
+
+struct sdhci_of_data sdhci_esdhc = {
+ .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_NONSTANDARD_CLOCK |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_PIO_NEEDS_DELAY |
+ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
+ SDHCI_QUIRK_NO_CARD_NO_RESET,
+ .ops = {
+ .readl = sdhci_be32bs_readl,
+ .readw = esdhc_readw,
+ .readb = sdhci_be32bs_readb,
+ .writel = sdhci_be32bs_writel,
+ .writew = esdhc_writew,
+ .writeb = esdhc_writeb,
+ .set_clock = esdhc_set_clock,
+ .enable_dma = esdhc_enable_dma,
+ .get_max_clock = esdhc_get_max_clock,
+ .get_min_clock = esdhc_get_min_clock,
+ },
+};
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 0000000..35117f3
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,65 @@
+/*
+ * drivers/mmc/host/sdhci-of-hlwd.c
+ *
+ * Nintendo Wii Secure Digital Host Controller Interface.
+ * Copyright (C) 2009 The GameCube Linux Team
+ * Copyright (C) 2009 Albert Herranz
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include "sdhci-of.h"
+#include "sdhci.h"
+
+/*
+ * Ops and quirks for the Nintendo Wii SDHCI controllers.
+ */
+
+/*
+ * We need a small delay after each write, or things go horribly wrong.
+ */
+#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
+
+static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_be32bs_writel(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ sdhci_be32bs_writew(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ sdhci_be32bs_writeb(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+struct sdhci_of_data sdhci_hlwd = {
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE,
+ .ops = {
+ .readl = sdhci_be32bs_readl,
+ .readw = sdhci_be32bs_readw,
+ .readb = sdhci_be32bs_readb,
+ .writel = sdhci_hlwd_writel,
+ .writew = sdhci_hlwd_writew,
+ .writeb = sdhci_hlwd_writeb,
+ },
+};
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
new file mode 100644
index 0000000..ad09ad9
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of.h
@@ -0,0 +1,42 @@
+/*
+ * OpenFirmware bindings for Secure Digital Host Controller Interface.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __SDHCI_OF_H
+#define __SDHCI_OF_H
+
+#include <linux/types.h>
+#include "sdhci.h"
+
+struct sdhci_of_data {
+ unsigned int quirks;
+ struct sdhci_ops ops;
+};
+
+struct sdhci_of_host {
+ unsigned int clock;
+ u16 xfer_mode_shadow;
+};
+
+extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
+extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
+extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
+extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
+extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
+extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
+
+extern struct sdhci_of_data sdhci_esdhc;
+extern struct sdhci_of_data sdhci_hlwd;
+
+#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ce5f1d7..842f46f 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -8,6 +8,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#ifndef __SDHCI_H
+#define __SDHCI_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
@@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
extern int sdhci_resume_host(struct sdhci_host *host);
#endif
+
+#endif /* __SDHCI_H */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 7cccc85..e22c3fa 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -46,7 +46,9 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
clk |= 0x100;
}
- sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22);
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk>>22) & 1);
+
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
}
@@ -427,12 +429,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* Power sequence - OFF -> ON -> UP */
switch (ios->power_mode) {
case MMC_POWER_OFF: /* power down SD bus */
- sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
tmio_mmc_clk_stop(host);
break;
case MMC_POWER_ON: /* power up SD bus */
-
- sd_config_write8(host, CNF_PWR_CTL_2, 0x02);
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 1);
break;
case MMC_POWER_UP: /* start bus clock */
tmio_mmc_clk_start(host);
@@ -485,21 +488,15 @@ static int tmio_mmc_resume(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mmc_host *mmc = platform_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
int ret = 0;
/* Tell the MFD core we are ready to be enabled */
- if (cell->enable) {
- ret = cell->enable(dev);
+ if (cell->resume) {
+ ret = cell->resume(dev);
if (ret)
goto out;
}
- /* Enable the MMC/SD Control registers */
- sd_config_write16(host, CNF_CMD, SDCREN);
- sd_config_write32(host, CNF_CTL_BASE,
- (dev->resource[0].start >> host->bus_shift) & 0xfffe);
-
mmc_resume_host(mmc);
out:
@@ -514,17 +511,16 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct tmio_mmc_data *pdata;
- struct resource *res_ctl, *res_cnf;
+ struct resource *res_ctl;
struct tmio_mmc_host *host;
struct mmc_host *mmc;
int ret = -EINVAL;
- if (dev->num_resources != 3)
+ if (dev->num_resources != 2)
goto out;
res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
- res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
- if (!res_ctl || !res_cnf)
+ if (!res_ctl)
goto out;
pdata = cell->driver_data;
@@ -539,8 +535,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->pdev = dev;
platform_set_drvdata(dev, mmc);
+ host->set_pwr = pdata->set_pwr;
+ host->set_clk_div = pdata->set_clk_div;
+
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res_ctl) >> 10;
@@ -548,10 +548,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (!host->ctl)
goto host_free;
- host->cnf = ioremap(res_cnf->start, resource_size(res_cnf));
- if (!host->cnf)
- goto unmap_ctl;
-
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA;
mmc->f_max = pdata->hclk;
@@ -562,23 +558,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (cell->enable) {
ret = cell->enable(dev);
if (ret)
- goto unmap_cnf;
+ goto unmap_ctl;
}
- /* Enable the MMC/SD Control registers */
- sd_config_write16(host, CNF_CMD, SDCREN);
- sd_config_write32(host, CNF_CTL_BASE,
- (dev->resource[0].start >> host->bus_shift) & 0xfffe);
-
- /* Disable SD power during suspend */
- sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
-
- /* The below is required but why? FIXME */
- sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
-
- /* Power down SD bus*/
- sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
-
tmio_mmc_clk_stop(host);
reset(host);
@@ -586,14 +568,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (ret >= 0)
host->irq = ret;
else
- goto unmap_cnf;
+ goto unmap_ctl;
disable_mmc_irqs(host, TMIO_MASK_ALL);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
- goto unmap_cnf;
+ goto unmap_ctl;
mmc_add_host(mmc);
@@ -605,8 +587,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
return 0;
-unmap_cnf:
- iounmap(host->cnf);
unmap_ctl:
iounmap(host->ctl);
host_free:
@@ -626,7 +606,6 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
mmc_remove_host(mmc);
free_irq(host->irq, host);
iounmap(host->ctl);
- iounmap(host->cnf);
mmc_free_host(mmc);
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9fa9985..692dc23 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -11,26 +11,6 @@
#include <linux/highmem.h>
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
@@ -110,7 +90,6 @@
struct tmio_mmc_host {
- void __iomem *cnf;
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
@@ -119,10 +98,16 @@ struct tmio_mmc_host {
struct mmc_host *mmc;
int irq;
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
/* pio related stuff */
struct scatterlist *sg_ptr;
unsigned int sg_len;
unsigned int sg_off;
+
+ struct platform_device *pdev;
};
#include <linux/io.h>
@@ -163,25 +148,6 @@ static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
-static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
- u8 val)
-{
- writeb(val, host->cnf + (addr << host->bus_shift));
-}
-
-static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
- u16 val)
-{
- writew(val, host->cnf + (addr << host->bus_shift));
-}
-
-static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
- u32 val)
-{
- writew(val, host->cnf + (addr << host->bus_shift));
- writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
-}
-
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4c364d4..2de0cc8 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -549,4 +549,21 @@ config MTD_VMU
To build this as a module select M here, the module will be called
vmu-flash.
+config MTD_PISMO
+ tristate "MTD discovery driver for PISMO modules"
+ depends on I2C
+ depends on ARCH_VERSATILE
+ help
+ This driver allows for discovery of PISMO modules - see
+ <http://www.pismoworld.org/>. These are small modules containing
+ up to five memory devices (eg, SRAM, flash, DOC) described by an
+ I2C EEPROM.
+
+ This driver does not create any MTD maps itself; instead it
+ creates MTD physmap and MTD SRAM platform devices. If you
+ enable this option, you should consider enabling MTD_PHYSMAP
+ and/or MTD_PLATRAM according to the devices on your module.
+
+ When built as a module, it will be called pismo.ko
+
endmenu
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
new file mode 100644
index 0000000..c48cad2
--- /dev/null
+++ b/drivers/mtd/maps/pismo.c
@@ -0,0 +1,320 @@
+/*
+ * PISMO memory driver - http://www.pismoworld.org/
+ *
+ * For ARM Realview and Versatile platforms
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/pismo.h>
+
+#define PISMO_NUM_CS 5
+
+struct pismo_cs_block {
+ u8 type;
+ u8 width;
+ __le16 access;
+ __le32 size;
+ u32 reserved[2];
+ char device[32];
+} __packed;
+
+struct pismo_eeprom {
+ struct pismo_cs_block cs[PISMO_NUM_CS];
+ char board[15];
+ u8 sum;
+} __packed;
+
+struct pismo_mem {
+ phys_addr_t base;
+ u32 size;
+ u16 access;
+ u8 width;
+ u8 type;
+};
+
+struct pismo_data {
+ struct i2c_client *client;
+ void (*vpp)(void *, int);
+ void *vpp_data;
+ struct platform_device *dev[PISMO_NUM_CS];
+};
+
+/* FIXME: set_vpp could do with a better calling convention */
+static struct pismo_data *vpp_pismo;
+static DEFINE_MUTEX(pismo_mutex);
+
+static int pismo_setvpp_probe_fix(struct pismo_data *pismo)
+{
+ mutex_lock(&pismo_mutex);
+ if (vpp_pismo) {
+ mutex_unlock(&pismo_mutex);
+ kfree(pismo);
+ return -EBUSY;
+ }
+ vpp_pismo = pismo;
+ mutex_unlock(&pismo_mutex);
+ return 0;
+}
+
+static void pismo_setvpp_remove_fix(struct pismo_data *pismo)
+{
+ mutex_lock(&pismo_mutex);
+ if (vpp_pismo == pismo)
+ vpp_pismo = NULL;
+ mutex_unlock(&pismo_mutex);
+}
+
+static void pismo_set_vpp(struct map_info *map, int on)
+{
+ struct pismo_data *pismo = vpp_pismo;
+
+ pismo->vpp(pismo->vpp_data, on);
+}
+/* end of hack */
+
+
+static unsigned int __devinit pismo_width_to_bytes(unsigned int width)
+{
+ width &= 15;
+ if (width > 2)
+ return 0;
+ return 1 << width;
+}
+
+static int __devinit pismo_eeprom_read(struct i2c_client *client, void *buf,
+ u8 addr, size_t size)
+{
+ int ret;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(addr),
+ .buf = &addr,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+
+ return ret == ARRAY_SIZE(msg) ? size : -EIO;
+}
+
+static int __devinit pismo_add_device(struct pismo_data *pismo, int i,
+ struct pismo_mem *region, const char *name, void *pdata, size_t psize)
+{
+ struct platform_device *dev;
+ struct resource res = { };
+ phys_addr_t base = region.base;
+ int ret;
+
+ if (base == ~0)
+ return -ENXIO;
+
+ res.start = base;
+ res.end = base + region->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ dev = platform_device_alloc(name, i);
+ if (!dev)
+ return -ENOMEM;
+ dev->dev.parent = &pismo->client->dev;
+
+ do {
+ ret = platform_device_add_resources(dev, &res, 1);
+ if (ret)
+ break;
+
+ ret = platform_device_add_data(dev, pdata, psize);
+ if (ret)
+ break;
+
+ ret = platform_device_add(dev);
+ if (ret)
+ break;
+
+ pismo->dev[i] = dev;
+ return 0;
+ } while (0);
+
+ platform_device_put(dev);
+ return ret;
+}
+
+static int __devinit pismo_add_nor(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct physmap_flash_data data = {
+ .width = region->width,
+ };
+
+ if (pismo->vpp)
+ data.set_vpp = pismo_set_vpp;
+
+ return pismo_add_device(pismo, i, region, "physmap-flash",
+ &data, sizeof(data));
+}
+
+static int __devinit pismo_add_sram(struct pismo_data *pismo, int i,
+ struct pismo_mem *region)
+{
+ struct platdata_mtd_ram data = {
+ .bankwidth = region->width,
+ };
+
+ return pismo_add_device(pismo, i, region, "mtd-ram",
+ &data, sizeof(data));
+}
+
+static void __devinit pismo_add_one(struct pismo_data *pismo, int i,
+ const struct pismo_cs_block *cs, phys_addr_t base)
+{
+ struct device *dev = &pismo->client->dev;
+ struct pismo_mem region;
+
+ region.base = base;
+ region.type = cs->type;
+ region.width = pismo_width_to_bytes(cs->width);
+ region.access = le16_to_cpu(cs->access);
+ region.size = le32_to_cpu(cs->size);
+
+ if (region.width == 0) {
+ dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
+ return;
+ }
+
+ /*
+ * FIXME: may need to the platforms memory controller here, but at
+ * the moment we assume that it has already been correctly setup.
+ * The memory controller can also tell us the base address as well.
+ */
+
+ dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
+ i, cs->device, region.type, region.access, region.size / 1024);
+
+ switch (region.type) {
+ case 0:
+ break;
+ case 1:
+ /* static DOC */
+ break;
+ case 2:
+ /* static NOR */
+ pismo_add_nor(pismo, i, &region);
+ break;
+ case 3:
+ /* static RAM */
+ pismo_add_sram(pismo, i, &region);
+ break;
+ }
+}
+
+static int __devexit pismo_remove(struct i2c_client *client)
+{
+ struct pismo_data *pismo = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
+ platform_device_unregister(pismo->dev[i]);
+
+ /* FIXME: set_vpp needs saner arguments */
+ pismo_setvpp_remove_fix(pismo);
+
+ kfree(pismo);
+
+ return 0;
+}
+
+static int __devinit pismo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct pismo_pdata *pdata = client->dev.platform_data;
+ struct pismo_eeprom eeprom;
+ struct pismo_data *pismo;
+ int ret, i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "functionality mismatch\n");
+ return -EIO;
+ }
+
+ pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
+ if (!pismo)
+ return -ENOMEM;
+
+ /* FIXME: set_vpp needs saner arguments */
+ ret = pismo_setvpp_probe_fix(pismo);
+ if (ret)
+ return ret;
+
+ pismo->client = client;
+ if (pdata) {
+ pismo->vpp = pdata->set_vpp;
+ pismo->vpp_data = pdata->vpp_data;
+ }
+ i2c_set_clientdata(client, pismo);
+
+ ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(&client->dev, "%.15s board found\n", eeprom.board);
+
+ for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
+ if (eeprom.cs[i].type != 0xff)
+ pismo_add_one(pismo, i, &eeprom.cs[i],
+ pdata->cs_addrs[i]);
+
+ return 0;
+}
+
+static const struct i2c_device_id pismo_id[] = {
+ { "pismo" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pismo_id);
+
+static struct i2c_driver pismo_driver = {
+ .driver = {
+ .name = "pismo",
+ .owner = THIS_MODULE,
+ },
+ .probe = pismo_probe,
+ .remove = __devexit_p(pismo_remove),
+ .id_table = pismo_id,
+};
+
+static int __init pismo_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
+ BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
+
+ return i2c_add_driver(&pismo_driver);
+}
+module_init(pismo_init);
+
+static void __exit pismo_exit(void)
+{
+ i2c_del_driver(&pismo_driver);
+}
+module_exit(pismo_exit);
+
+MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
+MODULE_DESCRIPTION("PISMO memory driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 74fa075..b13f641 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -20,14 +20,23 @@
#include <asm/io.h>
#include <mach/hardware.h>
-#include <asm/cacheflush.h>
#include <asm/mach/flash.h>
+#define CACHELINESIZE 32
+
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len)
{
- flush_ioremap_region(map->phys, map->cached, from, len);
+ unsigned long start = (unsigned long)map->cached + from;
+ unsigned long end = start + len;
+
+ start &= ~(CACHELINESIZE - 1);
+ while (start < end) {
+ /* invalidate D cache line */
+ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
+ start += CACHELINESIZE;
+ }
}
struct pxa2xx_flash_info {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c89aaab..7a67218 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -289,14 +289,6 @@ config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on ARCH_PXA
-config MTD_NAND_BASLER_EXCITE
- tristate "Support for NAND Flash on Basler eXcite"
- depends on BASLER_EXCITE
- help
- This enables the driver for the NAND flash device found on the
- Basler eXcite Smart Camera. If built as a module, the driver
- will be named excite_nandflash.
-
config MTD_NAND_CAFE
tristate "NAND support for OLPC CAFÉ chip"
depends on PCI
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d925796..f39d0b6 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
-obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
deleted file mode 100644
index af6a6a5..0000000
--- a/drivers/mtd/nand/excite_nandflash.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
-* Copyright (C) 2005 - 2007 by Basler Vision Technologies AG
-* Author: Thomas Koeller <thomas.koeller.qbaslerweb.com>
-* Original code by Thies Moeller <thies.moeller@baslerweb.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite_nandflash.h>
-
-#define EXCITE_NANDFLASH_VERSION "0.1"
-
-/* I/O register offsets */
-#define EXCITE_NANDFLASH_DATA_BYTE 0x00
-#define EXCITE_NANDFLASH_STATUS_BYTE 0x0c
-#define EXCITE_NANDFLASH_ADDR_BYTE 0x10
-#define EXCITE_NANDFLASH_CMD_BYTE 0x14
-
-/* prefix for debug output */
-static const char module_id[] = "excite_nandflash";
-
-/*
- * partition definition
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "eXcite RootFS",
- .offset = 0,
- .size = MTDPART_SIZ_FULL
- }
-};
-
-static inline const struct resource *
-excite_nand_get_resource(struct platform_device *d, unsigned long flags,
- const char *basename)
-{
- char buf[80];
-
- if (snprintf(buf, sizeof buf, "%s_%u", basename, d->id) >= sizeof buf)
- return NULL;
- return platform_get_resource_byname(d, flags, buf);
-}
-
-static inline void __iomem *
-excite_nand_map_regs(struct platform_device *d, const char *basename)
-{
- void *result = NULL;
- const struct resource *const r =
- excite_nand_get_resource(d, IORESOURCE_MEM, basename);
-
- if (r)
- result = ioremap_nocache(r->start, r->end + 1 - r->start);
- return result;
-}
-
-/* controller and mtd information */
-struct excite_nand_drvdata {
- struct mtd_info board_mtd;
- struct nand_chip board_chip;
- void __iomem *regs;
- void __iomem *tgt;
-};
-
-/* Control function */
-static void excite_nand_control(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct excite_nand_drvdata * const d =
- container_of(mtd, struct excite_nand_drvdata, board_mtd);
-
- switch (ctrl) {
- case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
- d->tgt = d->regs + EXCITE_NANDFLASH_CMD_BYTE;
- break;
- case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
- d->tgt = d->regs + EXCITE_NANDFLASH_ADDR_BYTE;
- break;
- case NAND_CTRL_CHANGE | NAND_NCE:
- d->tgt = d->regs + EXCITE_NANDFLASH_DATA_BYTE;
- break;
- }
-
- if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, d->tgt);
-}
-
-/* Return 0 if flash is busy, 1 if ready */
-static int excite_nand_devready(struct mtd_info *mtd)
-{
- struct excite_nand_drvdata * const drvdata =
- container_of(mtd, struct excite_nand_drvdata, board_mtd);
-
- return __raw_readb(drvdata->regs + EXCITE_NANDFLASH_STATUS_BYTE);
-}
-
-/*
- * Called by device layer to remove the driver.
- * The binding to the mtd and all allocated
- * resources are released.
- */
-static int __devexit excite_nand_remove(struct platform_device *dev)
-{
- struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
-
- platform_set_drvdata(dev, NULL);
-
- if (unlikely(!this)) {
- printk(KERN_ERR "%s: called %s without private data!!",
- module_id, __func__);
- return -EINVAL;
- }
-
- /* first thing we need to do is release our mtd
- * then go through freeing the resource used
- */
- nand_release(&this->board_mtd);
-
- /* free the common resources */
- iounmap(this->regs);
- kfree(this);
-
- DEBUG(MTD_DEBUG_LEVEL1, "%s: removed\n", module_id);
- return 0;
-}
-
-/*
- * Called by device layer when it finds a device matching
- * one our driver can handle. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices.
-*/
-static int __init excite_nand_probe(struct platform_device *pdev)
-{
- struct excite_nand_drvdata *drvdata; /* private driver data */
- struct nand_chip *board_chip; /* private flash chip data */
- struct mtd_info *board_mtd; /* mtd info for this board */
- int scan_res;
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (unlikely(!drvdata)) {
- printk(KERN_ERR "%s: no memory for drvdata\n",
- module_id);
- return -ENOMEM;
- }
-
- /* bind private data into driver */
- platform_set_drvdata(pdev, drvdata);
-
- /* allocate and map the resource */
- drvdata->regs =
- excite_nand_map_regs(pdev, EXCITE_NANDFLASH_RESOURCE_REGS);
-
- if (unlikely(!drvdata->regs)) {
- printk(KERN_ERR "%s: cannot reserve register region\n",
- module_id);
- kfree(drvdata);
- return -ENXIO;
- }
-
- drvdata->tgt = drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
-
- /* initialise our chip */
- board_chip = &drvdata->board_chip;
- board_chip->IO_ADDR_R = board_chip->IO_ADDR_W =
- drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
- board_chip->cmd_ctrl = excite_nand_control;
- board_chip->dev_ready = excite_nand_devready;
- board_chip->chip_delay = 25;
- board_chip->ecc.mode = NAND_ECC_SOFT;
-
- /* link chip to mtd */
- board_mtd = &drvdata->board_mtd;
- board_mtd->priv = board_chip;
-
- DEBUG(MTD_DEBUG_LEVEL2, "%s: device scan\n", module_id);
- scan_res = nand_scan(&drvdata->board_mtd, 1);
-
- if (likely(!scan_res)) {
- DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
- add_mtd_partitions(&drvdata->board_mtd, partition_info,
- ARRAY_SIZE(partition_info));
- } else {
- iounmap(drvdata->regs);
- kfree(drvdata);
- printk(KERN_ERR "%s: device scan failed\n", module_id);
- return -EIO;
- }
- return 0;
-}
-
-static struct platform_driver excite_nand_driver = {
- .driver = {
- .name = "excite_nand",
- .owner = THIS_MODULE,
- },
- .probe = excite_nand_probe,
- .remove = __devexit_p(excite_nand_remove)
-};
-
-static int __init excite_nand_init(void)
-{
- pr_info("Basler eXcite nand flash driver Version "
- EXCITE_NANDFLASH_VERSION "\n");
- return platform_driver_register(&excite_nand_driver);
-}
-
-static void __exit excite_nand_exit(void)
-{
- platform_driver_unregister(&excite_nand_driver);
-}
-
-module_init(excite_nand_init);
-module_exit(excite_nand_exit);
-
-MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
-MODULE_DESCRIPTION("Basler eXcite NAND-Flash driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(EXCITE_NANDFLASH_VERSION)
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 79fc453..25c5dd0 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -147,6 +147,10 @@ static int scan_for_bad_eraseblocks(void)
}
memset(bbt, 0 , ebcnt);
+ /* NOR flash does not implement block_isbad */
+ if (mtd->block_isbad == NULL)
+ return 0;
+
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -184,7 +188,7 @@ static int __init mtd_readtest_init(void)
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
- pgcnt = mtd->erasesize / mtd->writesize;
+ pgcnt = mtd->erasesize / pgsize;
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 141363a..7fbb51d 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -301,6 +301,10 @@ static int scan_for_bad_eraseblocks(void)
}
memset(bbt, 0 , ebcnt);
+ /* NOR flash does not implement block_isbad */
+ if (mtd->block_isbad == NULL)
+ goto out;
+
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -309,6 +313,7 @@ static int scan_for_bad_eraseblocks(void)
cond_resched();
}
printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad);
+out:
goodebcnt = ebcnt - bad;
return 0;
}
@@ -340,7 +345,7 @@ static int __init mtd_speedtest_init(void)
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
- pgcnt = mtd->erasesize / mtd->writesize;
+ pgcnt = mtd->erasesize / pgsize;
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 6392047..a99d3cd 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -227,6 +227,10 @@ static int scan_for_bad_eraseblocks(void)
}
memset(bbt, 0 , ebcnt);
+ /* NOR flash does not implement block_isbad */
+ if (mtd->block_isbad == NULL)
+ return 0;
+
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
bbt[i] = is_block_bad(i) ? 1 : 0;
@@ -265,7 +269,7 @@ static int __init mtd_stresstest_init(void)
tmp = mtd->size;
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
- pgcnt = mtd->erasesize / mtd->writesize;
+ pgcnt = mtd->erasesize / pgsize;
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f237ddb..111ea41 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -853,7 +853,6 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- req.name[req.name_len] = '\0';
err = verify_mkvol_req(ubi, &req);
if (err)
break;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 277786e..1361574 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
*/
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
- int error, ubi_num, vol_id;
- struct ubi_volume_desc *ret;
+ int error, ubi_num, vol_id, mod;
struct inode *inode;
struct path path;
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
return ERR_PTR(error);
inode = path.dentry->d_inode;
+ mod = inode->i_mode;
ubi_num = ubi_major2num(imajor(inode));
vol_id = iminor(inode) - 1;
+ path_put(&path);
+ if (!S_ISCHR(mod))
+ return ERR_PTR(-EINVAL);
if (vol_id >= 0 && ubi_num >= 0)
- ret = ubi_open_volume(ubi_num, vol_id, mode);
- else
- ret = ERR_PTR(-ENODEV);
-
- path_put(&path);
- return ret;
+ return ubi_open_volume(ubi_num, vol_id, mode);
+ return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index c1d7b88..425bf5a 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -155,6 +155,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
return err;
vol->updating = 0;
+ return 0;
}
vol->upd_buf = vmalloc(ubi->leb_size);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 1afc61e..40044028 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
vol->alignment = be32_to_cpu(vtbl[i].alignment);
vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
vol->name_len = be16_to_cpu(vtbl[i].name_len);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index fbc2311..77cf090 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -56,6 +56,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
/* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
- sizeof(dev->dev_addr));
+ memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
/* The Tx-block list is written as needed. We just set up the values. */
lp->tx_cmd_link = IDLELOOP + 4;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a5be9ac..dd9a09c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1953,6 +1953,8 @@ config BCM63XX_ENET
source "drivers/net/fs_enet/Kconfig"
+source "drivers/net/octeon/Kconfig"
+
endif # NET_ETHERNET
#
@@ -2344,6 +2346,7 @@ config GELIC_NET
config GELIC_WIRELESS
bool "PS3 Wireless support"
+ depends on WLAN
depends on GELIC_NET
select WIRELESS_EXT
help
@@ -2356,6 +2359,7 @@ config GELIC_WIRELESS
config GELIC_WIRELESS_OLD_PSK_INTERFACE
bool "PS3 Wireless private PSK interface (OBSOLETE)"
depends on GELIC_WIRELESS
+ select WEXT_PRIV
help
This option retains the obsolete private interface to pass
the PSK from user space programs to the driver. The PSK
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 246323d..ad1346d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e..39e1c0d 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
tristate "Nuvoton w90p910 Ethernet support"
depends on ARM && ARCH_W90X900
select PHYLIB
+ select MII
help
Say Y here if you want to use built-in Ethernet ports
on w90p910 processor.
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index c5721cb..cc9ed86 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -663,7 +663,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
- if (i < 0 || (DREG & CSR0_ERR)) {
+ if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c0451d7..ec52529 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1959,12 +1959,15 @@ static int atl2_get_eeprom(struct net_device *netdev,
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
- if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
- return -EIO;
+ if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
+ ret_val = -EIO;
+ goto free;
+ }
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
+free:
kfree(eeprom_buff);
return ret_val;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9c..1dd4403 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
size = (res->end - res->start) + 1;
ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (ax->mem2 == NULL) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 1f6c548..0bd47d3 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1245,9 +1245,15 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
-static int bcm_enet_get_stats_count(struct net_device *netdev)
+static int bcm_enet_get_sset_count(struct net_device *netdev,
+ int string_set)
{
- return BCM_ENET_STATS_LEN;
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_ENET_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
}
static void bcm_enet_get_strings(struct net_device *netdev,
@@ -1473,7 +1479,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
static struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
- .get_stats_count = bcm_enet_get_stats_count,
+ .get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.get_settings = bcm_enet_get_settings,
.set_settings = bcm_enet_set_settings,
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9e56014..5bc7459 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -275,8 +275,14 @@ struct be_adapter {
u32 tx_fc; /* Tx flow control */
int link_speed;
u8 port_type;
+ u8 transceiver;
+ u8 generation; /* BladeEngine ASIC generation */
};
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
+
extern const struct ethtool_ops be_ethtool_ops;
#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1b68bd9..006cb2ef 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -286,7 +286,7 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
wrb->tag0 = opcode;
- be_dws_cpu_to_le(wrb, 20);
+ be_dws_cpu_to_le(wrb, 8);
}
/* Don't touch the hdr after it's prepared */
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -1479,6 +1480,41 @@ err:
return status;
}
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_lmode *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ sizeof(*req));
+
+ req->src_port = port_num;
+ req->dest_port = port_num;
+ req->loopback_type = loopback_type;
+ req->loopback_state = enable;
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
{
@@ -1501,6 +1537,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+ req->hdr.timeout = 4;
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 92b87ef..13b33c8 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -155,6 +155,7 @@ struct be_mcc_mailbox {
#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
struct be_cmd_req_hdr {
u8 opcode; /* dword 0 */
@@ -163,7 +164,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd[3]; /* dword 3 */
};
#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
@@ -821,6 +823,19 @@ struct be_cmd_resp_loopback_test {
u32 ticks_compl;
};
+struct be_cmd_req_set_lmode {
+ struct be_cmd_req_hdr hdr;
+ u8 src_port;
+ u8 dest_port;
+ u8 loopback_type;
+ u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+ struct be_cmd_resp_hdr resp_hdr;
+ u8 rsvd0[4];
+};
+
/********************** DDR DMA test *********************/
struct be_cmd_req_ddrdma_test {
struct be_cmd_req_hdr hdr;
@@ -912,3 +927,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+ u8 loopback_type, u8 enable);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 298b92c..5d001c4 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_MAC_LOOPBACK 0x0
#define BE_PHY_LOOPBACK 0x1
#define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
static void
be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = be_cmd_read_port_type(adapter, adapter->port_num,
&connector);
- switch (connector) {
- case 7:
- ecmd->port = PORT_FIBRE;
- break;
- default:
- ecmd->port = PORT_TP;
- break;
+ if (!status) {
+ switch (connector) {
+ case 7:
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ case 0:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_EXTERNAL;
+ break;
+ default:
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+ break;
+ }
+ } else {
+ ecmd->port = PORT_AUI;
+ ecmd->transceiver = XCVR_INTERNAL;
}
/* Save for future use */
adapter->link_speed = ecmd->speed;
adapter->port_type = ecmd->port;
+ adapter->transceiver = ecmd->transceiver;
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
+ ecmd->transceiver = adapter->transceiver;
}
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
ecmd->phy_address = adapter->port_num;
- ecmd->transceiver = XCVR_INTERNAL;
+ switch (ecmd->port) {
+ case PORT_FIBRE:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ break;
+ case PORT_TP:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+ break;
+ case PORT_AUI:
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+ break;
+ }
return 0;
}
@@ -489,6 +512,19 @@ err:
return ret;
}
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+ u64 *status)
+{
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ loopback_type, 1);
+ *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ loopback_type, 1500,
+ 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->port_num,
+ BE_NO_LOOPBACK, 1);
+ return *status;
+}
+
static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
@@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_MAC_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[0] != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+ &data[0]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_PHY_LOOPBACK, 1500,
- 2, 0xabc);
- if (data[1] != 0)
+ }
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+ &data[1]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
-
- data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
- BE_ONE_PORT_EXT_LOOPBACK,
- 1500, 2, 0xabc);
- if (data[2] != 0)
+ }
+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+ &data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
+ }
data[3] = be_test_ddr_dma(adapter);
if (data[3] != 0)
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 3a1f790..626b76c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -910,7 +910,7 @@ static inline struct page *be_alloc_pages(u32 size)
static void be_post_rx_frags(struct be_adapter *adapter)
{
struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
- struct be_rx_page_info *page_info = NULL;
+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct page *pagep = NULL;
struct be_eth_rx_d *rxd;
@@ -941,7 +941,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
rxd = queue_head_node(rxq);
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
- queue_head_inc(rxq);
/* Any space left in the current big page for another frag? */
if ((page_offset + rx_frag_size + rx_frag_size) >
@@ -949,10 +948,13 @@ static void be_post_rx_frags(struct be_adapter *adapter)
pagep = NULL;
page_info->last_page_user = true;
}
+
+ prev_page_info = page_info;
+ queue_head_inc(rxq);
page_info = &page_info_tbl[rxq->head];
}
if (pagep)
- page_info->last_page_user = true;
+ prev_page_info->last_page_user = true;
if (posted) {
atomic_add(posted, &rxq->used);
@@ -1348,7 +1350,7 @@ static irqreturn_t be_intx(int irq, void *dev)
int isr;
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- be_pci_func(adapter) * CEV_ISR_SIZE);
+ (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
@@ -2049,6 +2051,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
u8 __iomem *addr;
+ int pcicfg_reg;
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@@ -2062,8 +2065,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
goto pci_map_err;
adapter->db = addr;
- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
- pci_resource_len(adapter->pdev, 1));
+ if (adapter->generation == BE_GEN2)
+ pcicfg_reg = 1;
+ else
+ pcicfg_reg = 0;
+
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
+ pci_resource_len(adapter->pdev, pcicfg_reg));
if (addr == NULL)
goto pci_map_err;
adapter->pcicfg = addr;
@@ -2160,6 +2168,7 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
if (cmd->va == NULL)
return -1;
+ memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -2238,6 +2247,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 8ffea39..0b23bc4 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
#include <asm/dma.h>
#include <linux/dma-mapping.h>
+#include <asm/dpmc.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/portmux.h>
@@ -386,8 +387,8 @@ static int mii_probe(struct net_device *dev)
u32 sclk, mdc_div;
/* Enable PHY output early */
- if (!(bfin_read_VR_CTL() & PHYCLKOE))
- bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+ if (!(bfin_read_VR_CTL() & CLKBUFOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
sclk = get_sclk();
mdc_div = ((sclk / MDC_CLK) / 2) - 1;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4bfc808..65df1de 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -653,12 +653,20 @@ static void
bnx2_netif_stop(struct bnx2 *bp)
{
bnx2_cnic_stop(bp);
- bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
+ int i;
+
bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
- bp->dev->trans_start = jiffies; /* prevent tx timeout */
+ /* prevent tx timeout */
+ for (i = 0; i < bp->dev->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txq->trans_start = jiffies;
+ }
}
+ bnx2_disable_int_sync(bp);
}
static void
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 77ba135..306c2b8 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+ bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+ CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d69e683..822f586 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
@@ -352,7 +354,8 @@ static u16 __get_link_speed(struct port *port)
}
}
- pr_debug("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed);
+ pr_debug("Port %d Received link speed %d update from adapter\n",
+ port->actor_port_number, speed);
return speed;
}
@@ -378,12 +381,14 @@ static u8 __get_duplex(struct port *port)
switch (slave->duplex) {
case DUPLEX_FULL:
retval=0x1;
- pr_debug("Port %d Received status full duplex update from adapter\n", port->actor_port_number);
+ pr_debug("Port %d Received status full duplex update from adapter\n",
+ port->actor_port_number);
break;
case DUPLEX_HALF:
default:
retval=0x0;
- pr_debug("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number);
+ pr_debug("Port %d Received status NOT full duplex update from adapter\n",
+ port->actor_port_number);
break;
}
}
@@ -980,7 +985,9 @@ static void ad_mux_machine(struct port *port)
// check if the state machine was changed
if (port->sm_mux_state != last_state) {
- pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state);
+ pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
__detach_bond_from_agg(port);
@@ -1079,7 +1086,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// check if the State machine was changed or new lacpdu arrived
if ((port->sm_rx_state != last_state) || (lacpdu)) {
- pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state);
+ pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
@@ -1126,9 +1135,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// detect loopback situation
if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
// INFO_RECEIVED_LOOPBACK_FRAMES
- pr_err(DRV_NAME ": %s: An illegal loopback occurred on "
- "adapter (%s). Check the configuration to verify that all "
- "Adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->dev->master->name, port->slave->dev->name);
__release_rx_machine_lock(port);
return;
@@ -1166,7 +1174,8 @@ static void ad_tx_machine(struct port *port)
__update_lacpdu_from_port(port);
if (ad_lacpdu_send(port) >= 0) {
- pr_debug("Sent LACPDU on port %d\n", port->actor_port_number);
+ pr_debug("Sent LACPDU on port %d\n",
+ port->actor_port_number);
/* mark ntt as false, so it will not be sent again until
demanded */
@@ -1241,7 +1250,9 @@ static void ad_periodic_machine(struct port *port)
// check if the state machine was changed
if (port->sm_periodic_state != last_state) {
- pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state);
+ pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
port->sm_periodic_timer_counter = 0; // zero timer
@@ -1298,7 +1309,9 @@ static void ad_port_selection_logic(struct port *port)
port->next_port_in_aggregator=NULL;
port->actor_port_aggregator_identifier=0;
- pr_debug("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier);
+ pr_debug("Port %d left LAG %d\n",
+ port->actor_port_number,
+ temp_aggregator->aggregator_identifier);
// if the aggregator is empty, clear its parameters, and set it ready to be attached
if (!temp_aggregator->lag_ports) {
ad_clear_agg(temp_aggregator);
@@ -1307,9 +1320,7 @@ static void ad_port_selection_logic(struct port *port)
}
}
if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
- pr_warning(DRV_NAME ": %s: Warning: Port %d (on %s) "
- "was related to aggregator %d but was not "
- "on its port list\n",
+ pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
port->slave->dev->master->name,
port->actor_port_number,
port->slave->dev->name,
@@ -1343,7 +1354,9 @@ static void ad_port_selection_logic(struct port *port)
port->next_port_in_aggregator=aggregator->lag_ports;
port->aggregator->num_of_ports++;
aggregator->lag_ports=port;
- pr_debug("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Port %d joined LAG %d(existing LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
// mark this port as selected
port->sm_vars |= AD_PORT_SELECTED;
@@ -1380,10 +1393,11 @@ static void ad_port_selection_logic(struct port *port)
// mark this port as selected
port->sm_vars |= AD_PORT_SELECTED;
- pr_debug("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Port %d joined LAG %d(new LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
} else {
- pr_err(DRV_NAME ": %s: Port %d (on %s) did not find "
- "a suitable aggregator\n",
+ pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
port->slave->dev->master->name,
port->actor_port_number, port->slave->dev->name);
}
@@ -1460,8 +1474,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warning(DRV_NAME
- ": %s: Impossible agg select mode %d\n",
+ pr_warning("%s: Impossible agg select mode %d\n",
curr->slave->dev->master->name,
__get_agg_selection_mode(curr->lag_ports));
break;
@@ -1546,40 +1559,38 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// if there is new best aggregator, activate it
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- best->aggregator_identifier, best->num_of_ports,
- best->actor_oper_aggregator_key,
- best->partner_oper_aggregator_key,
- best->is_individual, best->is_active);
+ best->aggregator_identifier, best->num_of_ports,
+ best->actor_oper_aggregator_key,
+ best->partner_oper_aggregator_key,
+ best->is_individual, best->is_active);
pr_debug("best ports %p slave %p %s\n",
- best->lag_ports, best->slave,
- best->slave ? best->slave->dev->name : "NULL");
+ best->lag_ports, best->slave,
+ best->slave ? best->slave->dev->name : "NULL");
for (agg = __get_first_agg(best->lag_ports); agg;
agg = __get_next_agg(agg)) {
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- agg->aggregator_identifier, agg->num_of_ports,
- agg->actor_oper_aggregator_key,
- agg->partner_oper_aggregator_key,
- agg->is_individual, agg->is_active);
+ agg->aggregator_identifier, agg->num_of_ports,
+ agg->actor_oper_aggregator_key,
+ agg->partner_oper_aggregator_key,
+ agg->is_individual, agg->is_active);
}
// check if any partner replys
if (best->is_individual) {
- pr_warning(DRV_NAME ": %s: Warning: No 802.3ad"
- " response from the link partner for any"
- " adapters in the bond\n",
- best->slave->dev->master->name);
+ pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave ? best->slave->dev->master->name : "NULL");
}
best->is_active = 1;
pr_debug("LAG %d chosen as the active LAG\n",
- best->aggregator_identifier);
+ best->aggregator_identifier);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- best->aggregator_identifier, best->num_of_ports,
- best->actor_oper_aggregator_key,
- best->partner_oper_aggregator_key,
- best->is_individual, best->is_active);
+ best->aggregator_identifier, best->num_of_ports,
+ best->actor_oper_aggregator_key,
+ best->partner_oper_aggregator_key,
+ best->is_individual, best->is_active);
// disable the ports that were related to the former active_aggregator
if (active) {
@@ -1633,7 +1644,8 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->lag_ports = NULL;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
- pr_debug("LAG %d was cleared\n", aggregator->aggregator_identifier);
+ pr_debug("LAG %d was cleared\n",
+ aggregator->aggregator_identifier);
}
}
@@ -1728,7 +1740,9 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
static void ad_enable_collecting_distributing(struct port *port)
{
if (port->aggregator->is_active) {
- pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Enabling port %d(LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__enable_port(port);
}
}
@@ -1741,7 +1755,9 @@ static void ad_enable_collecting_distributing(struct port *port)
static void ad_disable_collecting_distributing(struct port *port)
{
if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
- pr_debug("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Disabling port %d(LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__disable_port(port);
}
}
@@ -1779,7 +1795,8 @@ static void ad_marker_info_send(struct port *port)
// send the marker information
if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Information on port %d\n", port->actor_port_number);
+ pr_debug("Sent Marker Information on port %d\n",
+ port->actor_port_number);
}
}
#endif
@@ -1803,7 +1820,8 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
// send the marker response
if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Response on port %d\n", port->actor_port_number);
+ pr_debug("Sent Marker Response on port %d\n",
+ port->actor_port_number);
}
}
@@ -1889,8 +1907,7 @@ int bond_3ad_bind_slave(struct slave *slave)
struct aggregator *aggregator;
if (bond == NULL) {
- pr_err(DRV_NAME ": %s: The slave %s is not attached to "
- "its bond\n",
+ pr_err("%s: The slave %s is not attached to its bond\n",
slave->dev->master->name, slave->dev->name);
return -1;
}
@@ -1966,13 +1983,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: Trying to "
- "unbind an uninitialized port on %s\n",
+ pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
- pr_debug("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier);
+ pr_debug("Unbinding Link Aggregation Group %d\n",
+ aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1996,10 +2013,12 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if new aggregator found, copy the aggregator's parameters
// and connect the related lag_ports to the new aggregator
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
+ pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ aggregator->aggregator_identifier,
+ new_aggregator->aggregator_identifier);
if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
aggregator->slave->dev->master->name);
// select new active aggregator
select_new_active_agg = 1;
@@ -2030,8 +2049,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_agg_selection_logic(__get_first_agg(port));
}
} else {
- pr_warning(DRV_NAME ": %s: Warning: unbinding aggregator, "
- "and could not find a new aggregator for its ports\n",
+ pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
slave->dev->master->name);
}
} else { // in case that the only port related to this aggregator is the one we want to remove
@@ -2039,7 +2057,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
slave->dev->master->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
@@ -2066,7 +2084,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// clear the aggregator
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
slave->dev->master->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
@@ -2115,8 +2133,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// select the active aggregator for the bond
if ((port = __get_first_port(bond))) {
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: bond's first port is "
- "uninitialized\n", bond->dev->name);
+ pr_warning("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2129,8 +2147,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// for each port run the state machines
for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: Found an uninitialized "
- "port\n", bond->dev->name);
+ pr_warning("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2171,15 +2189,15 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: port of slave %s "
- "is uninitialized\n",
+ pr_warning("%s: Warning: port of slave %s is uninitialized\n",
slave->dev->name, slave->dev->master->name);
return;
}
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
- pr_debug("Received LACPDU on port %d\n", port->actor_port_number);
+ pr_debug("Received LACPDU on port %d\n",
+ port->actor_port_number);
ad_rx_machine(lacpdu, port);
break;
@@ -2188,17 +2206,20 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
switch (((struct bond_marker *)lacpdu)->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
- pr_debug("Received Marker Information on port %d\n", port->actor_port_number);
+ pr_debug("Received Marker Information on port %d\n",
+ port->actor_port_number);
ad_marker_info_received((struct bond_marker *)lacpdu, port);
break;
case AD_MARKER_RESPONSE_SUBTYPE:
- pr_debug("Received Marker Response on port %d\n", port->actor_port_number);
+ pr_debug("Received Marker Response on port %d\n",
+ port->actor_port_number);
ad_marker_response_received((struct bond_marker *)lacpdu, port);
break;
default:
- pr_debug("Received an unknown Marker subtype on slot %d\n", port->actor_port_number);
+ pr_debug("Received an unknown Marker subtype on slot %d\n",
+ port->actor_port_number);
}
}
}
@@ -2218,8 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: speed "
- "changed for uninitialized port on %s\n",
+ pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2246,8 +2266,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: duplex changed "
- "for uninitialized port on %s\n",
+ pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2275,8 +2294,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: link status changed for "
- "uninitialized port on %s\n",
+ pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2381,8 +2399,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
- pr_debug(DRV_NAME ": %s: Error: "
- "bond_3ad_get_active_agg_info failed\n", dev->name);
+ pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
+ dev->name);
goto out;
}
@@ -2391,8 +2409,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slaves_in_agg == 0) {
/*the aggregator is empty*/
- pr_debug(DRV_NAME ": %s: Error: active aggregator is empty\n",
- dev->name);
+ pr_debug("%s: Error: active aggregator is empty\n", dev->name);
goto out;
}
@@ -2410,8 +2427,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (slave_agg_no >= 0) {
- pr_err(DRV_NAME ": %s: Error: Couldn't find a slave to tx on "
- "for aggregator ID %d\n", dev->name, agg_id);
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ dev->name, agg_id);
goto out;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 00ab51e..40fdc41 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -201,8 +203,7 @@ static int tlb_initialize(struct bonding *bond)
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to allocate TLB hash table\n",
+ pr_err("%s: Error: Failed to allocate TLB hash table\n",
bond->dev->name);
return -1;
}
@@ -514,8 +515,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
client_info->slave->dev->dev_addr,
client_info->mac_dst);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to create an ARP packet\n",
+ pr_err("%s: Error: failed to create an ARP packet\n",
client_info->slave->dev->master->name);
continue;
}
@@ -525,8 +525,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
if (client_info->tag) {
skb = vlan_put_tag(skb, client_info->vlan_id);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to insert VLAN tag\n",
+ pr_err("%s: Error: failed to insert VLAN tag\n",
client_info->slave->dev->master->name);
continue;
}
@@ -609,9 +608,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (!client_info->slave) {
- pr_err(DRV_NAME
- ": %s: Error: found a client with no channel in "
- "the client's hash table\n",
+ pr_err("%s: Error: found a client with no channel in the client's hash table\n",
bond->dev->name);
continue;
}
@@ -806,8 +803,7 @@ static int rlb_initialize(struct bonding *bond)
new_hashtbl = kmalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to allocate RLB hash table\n",
+ pr_err("%s: Error: Failed to allocate RLB hash table\n",
bond->dev->name);
return -1;
}
@@ -928,8 +924,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
skb = vlan_put_tag(skb, vlan->vlan_id);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to insert VLAN tag\n",
+ pr_err("%s: Error: failed to insert VLAN tag\n",
bond->dev->name);
continue;
}
@@ -958,11 +953,8 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
memcpy(s_addr.sa_data, addr, dev->addr_len);
s_addr.sa_family = dev->type;
if (dev_set_mac_address(dev, &s_addr)) {
- pr_err(DRV_NAME
- ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
- "mode requires that the base driver support setting "
- "the hw address also when the network device's "
- "interface is open\n",
+ pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
+ "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
dev->master->name, dev->name);
return -EOPNOTSUPP;
}
@@ -1169,18 +1161,12 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
- pr_warning(DRV_NAME
- ": %s: Warning: the hw address of slave %s is "
- "in use by the bond; giving it the hw address "
- "of %s\n",
+ pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
bond->dev->name, slave->dev->name,
free_mac_slave->dev->name);
} else if (has_bond_addr) {
- pr_err(DRV_NAME
- ": %s: Error: the hw address of slave %s is in use by the "
- "bond; couldn't find a slave with a free hw address to "
- "give it (this should not have happened)\n",
+ pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
bond->dev->name, slave->dev->name);
return -EFAULT;
}
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index b72e1dc..6dd64cf 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <net/ipv6.h>
@@ -74,20 +76,20 @@ static void bond_na_send(struct net_device *slave_dev,
addrconf_addr_solict_mult(daddr, &mcaddr);
pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
+ slave_dev->name, &mcaddr, daddr);
skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
ND_OPT_TARGET_LL_ADDR);
if (!skb) {
- pr_err(DRV_NAME ": NA packet allocation failed\n");
+ pr_err("NA packet allocation failed\n");
return;
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
- pr_err(DRV_NAME ": failed to insert VLAN tag\n");
+ pr_err("failed to insert VLAN tag\n");
return;
}
}
@@ -109,8 +111,8 @@ void bond_send_unsolicited_na(struct bonding *bond)
struct inet6_dev *idev;
int is_router;
- pr_debug("bond_send_unsol_na: bond %s slave %s\n", bond->dev->name,
- slave ? slave->dev->name : "NULL");
+ pr_debug("%s: bond %s slave %s\n", bond->dev->name,
+ __func__, slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_unsol_na ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index af9b9c4..efa0e41 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -31,6 +31,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -260,7 +262,7 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
struct vlan_entry *vlan;
pr_debug("bond: %s, vlan id %d\n",
- (bond ? bond->dev->name : "None"), vlan_id);
+ (bond ? bond->dev->name : "None"), vlan_id);
vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
if (!vlan)
@@ -303,8 +305,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
if (bond_is_lb(bond))
bond_alb_clear_vlan(bond, vlan_id);
- pr_debug("removed VLAN ID %d from bond %s\n", vlan_id,
- bond->dev->name);
+ pr_debug("removed VLAN ID %d from bond %s\n",
+ vlan_id, bond->dev->name);
kfree(vlan);
@@ -323,8 +325,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
}
}
- pr_debug("couldn't find VLAN ID %d in bond %s\n", vlan_id,
- bond->dev->name);
+ pr_debug("couldn't find VLAN ID %d in bond %s\n",
+ vlan_id, bond->dev->name);
out:
write_unlock_bh(&bond->lock);
@@ -348,7 +350,7 @@ static int bond_has_challenged_slaves(struct bonding *bond)
bond_for_each_slave(bond, slave, i) {
if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("found VLAN challenged slave - %s\n",
- slave->dev->name);
+ slave->dev->name);
return 1;
}
}
@@ -499,8 +501,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_add_vlan(bond, vid);
if (res) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to add vlan id %d\n",
+ pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
}
}
@@ -534,8 +535,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_del_vlan(bond, vid);
if (res) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to remove vlan id %d\n",
+ pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
}
}
@@ -1053,8 +1053,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(new_active->dev, &saddr);
if (rv) {
- pr_err(DRV_NAME
- ": %s: Error %d setting MAC of slave %s\n",
+ pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
goto out;
}
@@ -1067,16 +1066,14 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(old_active->dev, &saddr);
if (rv)
- pr_err(DRV_NAME
- ": %s: Error %d setting MAC of slave %s\n",
+ pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
- pr_err(DRV_NAME
- ": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
+ pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
bond->dev->name, bond->params.fail_over_mac);
break;
}
@@ -1178,11 +1175,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: making interface %s the new "
- "active one %d ms earlier.\n",
- bond->dev->name, new_active->dev->name,
- (bond->params.updelay - new_active->delay) * bond->params.miimon);
+ pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ bond->dev->name, new_active->dev->name,
+ (bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
@@ -1195,10 +1190,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: making interface %s the new "
- "active one.\n",
- bond->dev->name, new_active->dev->name);
+ pr_info("%s: making interface %s the new active one.\n",
+ bond->dev->name, new_active->dev->name);
}
}
}
@@ -1268,13 +1261,11 @@ void bond_select_active_slave(struct bonding *bond)
return;
if (netif_carrier_ok(bond->dev)) {
- pr_info(DRV_NAME
- ": %s: first active interface up!\n",
- bond->dev->name);
+ pr_info("%s: first active interface up!\n",
+ bond->dev->name);
} else {
- pr_info(DRV_NAME ": %s: "
- "now running without any active interface !\n",
- bond->dev->name);
+ pr_info("%s: now running without any active interface !\n",
+ bond->dev->name);
}
}
}
@@ -1423,16 +1414,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning(DRV_NAME
- ": %s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* bond must be initialized by bond_open() before enslaving */
if (!(bond_dev->flags & IFF_UP)) {
- pr_warning(DRV_NAME
- " %s: master_dev is not up in bond_enslave\n",
- bond_dev->name);
+ pr_warning("%s: master_dev is not up in bond_enslave\n",
+ bond_dev->name);
}
/* already enslaved */
@@ -1446,19 +1435,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
if (!list_empty(&bond->vlan_list)) {
- pr_err(DRV_NAME
- ": %s: Error: cannot enslave VLAN "
- "challenged slave %s on VLAN enabled "
- "bond %s\n", bond_dev->name, slave_dev->name,
- bond_dev->name);
+ pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning(DRV_NAME
- ": %s: Warning: enslaved VLAN challenged "
- "slave %s. Adding VLANs will be blocked as "
- "long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name, slave_dev->name,
- bond_dev->name);
+ pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
}
} else {
@@ -1478,8 +1461,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err(DRV_NAME ": %s is up. "
- "This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up. This may be due to an out of date ifenslave.\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1495,7 +1477,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->slave_cnt == 0) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
- bond_dev->name, bond_dev->type, slave_dev->type);
+ bond_dev->name,
+ bond_dev->type, slave_dev->type);
netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
@@ -1507,28 +1490,21 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err(DRV_NAME ": %s ether type (%d) is different "
- "from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
- res = -EINVAL;
- goto err_undo_flags;
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
+ slave_dev->name,
+ slave_dev->type, bond_dev->type);
+ res = -EINVAL;
+ goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (bond->slave_cnt == 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: The first slave device "
- "specified does not support setting the MAC "
- "address. Setting fail_over_mac to active.",
- bond_dev->name);
+ pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
+ bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err(DRV_NAME
- ": %s: Error: The slave device specified "
- "does not support setting the MAC address, "
- "but fail_over_mac is not set to active.\n"
- , bond_dev->name);
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@ -1655,22 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning(DRV_NAME
- ": %s: Warning: MII and ETHTOOL support not "
- "available for interface %s, and "
- "arp_interval/arp_ip_target module parameters "
- "not specified, thus bonding will not detect "
- "link failures! see bonding.txt for details.\n",
+ pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning(DRV_NAME
- ": %s: Warning: can't get link status from "
- "interface %s; the network driver associated "
- "with this interface does not support MII or "
- "ETHTOOL link status reporting, thus miimon "
- "has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1678,34 +1644,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.miimon ||
(bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
if (bond->params.updelay) {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_BACK\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
new_slave->link = BOND_LINK_BACK;
new_slave->delay = bond->params.updelay;
} else {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_UP\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
new_slave->link = BOND_LINK_UP;
}
new_slave->jiffies = jiffies;
} else {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_DOWN\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
new_slave->link = BOND_LINK_DOWN;
}
if (bond_update_speed_duplex(new_slave) &&
(new_slave->link != BOND_LINK_DOWN)) {
- pr_warning(DRV_NAME
- ": %s: Warning: failed to get speed and duplex from %s, "
- "assumed to be 100Mb/sec and Full.\n",
- bond_dev->name, new_slave->dev->name);
+ pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",
+ bond_dev->name, new_slave->dev->name);
if (bond->params.mode == BOND_MODE_8023AD) {
- pr_warning(DRV_NAME
- ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
- "support in base driver for proper aggregator "
- "selection.\n", bond_dev->name);
+ pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",
+ bond_dev->name);
}
}
@@ -1777,11 +1736,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (res)
goto err_close;
- pr_info(DRV_NAME
- ": %s: enslaving %s as a%s interface with a%s link.\n",
- bond_dev->name, slave_dev->name,
- new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ bond_dev->name, slave_dev->name,
+ new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
+ new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
/* enslave is successful */
return 0;
@@ -1833,8 +1791,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
(slave_dev->master != bond_dev)) {
- pr_err(DRV_NAME
- ": %s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s.\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1844,9 +1801,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
- pr_info(DRV_NAME
- ": %s: %s not enslaved\n",
- bond_dev->name, slave_dev->name);
+ pr_info("%s: %s not enslaved\n",
+ bond_dev->name, slave_dev->name);
write_unlock_bh(&bond->lock);
return -EINVAL;
}
@@ -1854,14 +1810,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.fail_over_mac) {
if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond->slave_cnt > 1)
- pr_warning(DRV_NAME
- ": %s: Warning: the permanent HWaddr of %s - "
- "%pM - is still in use by %s. "
- "Set the HWaddr of %s to a different address "
- "to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
/* Inform AD package of unbinding of slave. */
@@ -1872,12 +1824,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
bond_3ad_unbind_slave(slave);
}
- pr_info(DRV_NAME
- ": %s: releasing %s interface %s\n",
- bond_dev->name,
- (slave->state == BOND_STATE_ACTIVE)
- ? "active" : "backup",
- slave_dev->name);
+ pr_info("%s: releasing %s interface %s\n",
+ bond_dev->name,
+ (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup",
+ slave_dev->name);
oldcurrent = bond->curr_active_slave;
@@ -1934,21 +1884,15 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (list_empty(&bond->vlan_list)) {
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
- pr_warning(DRV_NAME
- ": %s: Warning: clearing HW address of %s while it "
- "still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning(DRV_NAME
- ": %s: When re-adding slaves, make sure the bond's "
- "HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
+ bond_dev->name, bond_dev->name);
+ pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
+ bond_dev->name);
}
} else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
!bond_has_challenged_slaves(bond)) {
- pr_info(DRV_NAME
- ": %s: last VLAN challenged slave %s "
- "left bond %s. VLAN blocking is removed\n",
- bond_dev->name, slave_dev->name, bond_dev->name);
+ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
@@ -2011,8 +1955,8 @@ int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
- pr_info(DRV_NAME ": %s: destroying bond %s.\n",
- bond_dev->name, bond_dev->name);
+ pr_info("%s: destroying bond %s.\n",
+ bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
return ret;
@@ -2116,19 +2060,13 @@ static int bond_release_all(struct net_device *bond_dev)
if (list_empty(&bond->vlan_list))
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
else {
- pr_warning(DRV_NAME
- ": %s: Warning: clearing HW address of %s while it "
- "still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning(DRV_NAME
- ": %s: When re-adding slaves, make sure the bond's "
- "HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
+ bond_dev->name, bond_dev->name);
+ pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
+ bond_dev->name);
}
- pr_info(DRV_NAME
- ": %s: released all slaves\n",
- bond_dev->name);
+ pr_info("%s: released all slaves\n", bond_dev->name);
out:
write_unlock_bh(&bond->lock);
@@ -2254,16 +2192,14 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info(DRV_NAME
- ": %s: link status down for %s"
- "interface %s, disabling it in %d ms.\n",
- bond->dev->name,
- (bond->params.mode ==
- BOND_MODE_ACTIVEBACKUP) ?
- ((slave->state == BOND_STATE_ACTIVE) ?
- "active " : "backup ") : "",
- slave->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ bond->dev->name,
+ (bond->params.mode ==
+ BOND_MODE_ACTIVEBACKUP) ?
+ ((slave->state == BOND_STATE_ACTIVE) ?
+ "active " : "backup ") : "",
+ slave->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_FAIL:
@@ -2273,13 +2209,11 @@ static int bond_miimon_inspect(struct bonding *bond)
*/
slave->link = BOND_LINK_UP;
slave->jiffies = jiffies;
- pr_info(DRV_NAME
- ": %s: link status up again after %d "
- "ms for interface %s.\n",
- bond->dev->name,
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ pr_info("%s: link status up again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
continue;
}
@@ -2300,25 +2234,21 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info(DRV_NAME
- ": %s: link status up for "
- "interface %s, enabling it in %d ms.\n",
- bond->dev->name, slave->dev->name,
- ignore_updelay ? 0 :
- bond->params.updelay *
- bond->params.miimon);
+ pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ bond->dev->name, slave->dev->name,
+ ignore_updelay ? 0 :
+ bond->params.updelay *
+ bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info(DRV_NAME
- ": %s: link status down again after %d "
- "ms for interface %s.\n",
- bond->dev->name,
- (bond->params.updelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ pr_info("%s: link status down again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
continue;
}
@@ -2366,10 +2296,8 @@ static void bond_miimon_commit(struct bonding *bond)
slave->state = BOND_STATE_BACKUP;
}
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: link status definitely up for interface %s.\n",
+ bond->dev->name, slave->dev->name);
/* notify ad that the link status has changed */
if (bond->params.mode == BOND_MODE_8023AD)
@@ -2395,10 +2323,8 @@ static void bond_miimon_commit(struct bonding *bond)
bond->params.mode == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: link status definitely down for "
- "interface %s, disabling it\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: link status definitely down for interface %s, disabling it\n",
+ bond->dev->name, slave->dev->name);
if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave,
@@ -2414,8 +2340,7 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
default:
- pr_err(DRV_NAME
- ": %s: invalid new link %d on slave %s\n",
+ pr_err("%s: invalid new link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
slave->new_link = BOND_LINK_NOCHANGE;
@@ -2534,19 +2459,19 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
struct sk_buff *skb;
pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
- slave_dev->name, dest_ip, src_ip, vlan_id);
+ slave_dev->name, dest_ip, src_ip, vlan_id);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
- pr_err(DRV_NAME ": ARP packet allocation failed\n");
+ pr_err("ARP packet allocation failed\n");
return;
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
- pr_err(DRV_NAME ": failed to insert VLAN tag\n");
+ pr_err("failed to insert VLAN tag\n");
return;
}
}
@@ -2586,9 +2511,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
if (rv) {
if (net_ratelimit()) {
- pr_warning(DRV_NAME
- ": %s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &fl.fl4_dst);
+ pr_warning("%s: no route to arp_ip_target %pI4\n",
+ bond->dev->name, &fl.fl4_dst);
}
continue;
}
@@ -2623,10 +2547,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
}
if (net_ratelimit()) {
- pr_warning(DRV_NAME
- ": %s: no path to arp_ip_target %pI4 via rt.dev %s\n",
- bond->dev->name, &fl.fl4_dst,
- rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
+ pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
+ bond->dev->name, &fl.fl4_dst,
+ rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
}
ip_rt_put(rt);
}
@@ -2644,8 +2567,8 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
struct vlan_entry *vlan;
struct net_device *vlan_dev;
- pr_debug("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name,
- slave ? slave->dev->name : "NULL");
+ pr_debug("bond_send_grat_arp: bond %s slave %s\n",
+ bond->dev->name, slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_grat_arp ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
@@ -2674,7 +2597,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
- &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
+ &sip, &tip, i, &targets[i],
+ bond_has_this_ip(bond, tip));
if (sip == targets[i]) {
if (bond_has_this_ip(bond, tip))
slave->last_arp_rx = jiffies;
@@ -2698,8 +2622,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
read_lock(&bond->lock);
pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n",
- bond->dev->name, skb->dev ? skb->dev->name : "NULL",
- orig_dev ? orig_dev->name : "NULL");
+ bond->dev->name, skb->dev ? skb->dev->name : "NULL",
+ orig_dev ? orig_dev->name : "NULL");
slave = bond_get_slave_by_dev(bond, orig_dev);
if (!slave || !slave_do_arp_validate(bond, slave))
@@ -2724,9 +2648,9 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
memcpy(&tip, arp_ptr, 4);
pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
- bond->dev->name, slave->dev->name, slave->state,
- bond->params.arp_validate, slave_do_arp_validate(bond, slave),
- &sip, &tip);
+ bond->dev->name, slave->dev->name, slave->state,
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
/*
* Backup slaves won't see the ARP reply, but do come through
@@ -2800,17 +2724,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s, ",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: link status definitely up for interface %s, ",
+ bond->dev->name,
+ slave->dev->name);
do_failover = 1;
} else {
- pr_info(DRV_NAME
- ": %s: interface %s is now up\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now up\n",
+ bond->dev->name,
+ slave->dev->name);
}
}
} else {
@@ -2829,10 +2750,9 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info(DRV_NAME
- ": %s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down.\n",
+ bond->dev->name,
+ slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2965,9 +2885,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s.\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2985,9 +2903,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
slave->link = BOND_LINK_DOWN;
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: link status definitely down for "
- "interface %s, disabling it\n",
+ pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
if (slave == bond->curr_active_slave) {
@@ -2998,8 +2914,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
continue;
default:
- pr_err(DRV_NAME
- ": %s: impossible: new_link %d on slave %s\n",
+ pr_err("%s: impossible: new_link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
continue;
@@ -3028,9 +2943,9 @@ static void bond_ab_arp_probe(struct bonding *bond)
read_lock(&bond->curr_slave_lock);
if (bond->current_arp_slave && bond->curr_active_slave)
- pr_info(DRV_NAME "PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
- bond->curr_active_slave->dev->name);
+ pr_info("PROBE: c_arp %s && cas %s BAD\n",
+ bond->current_arp_slave->dev->name,
+ bond->curr_active_slave->dev->name);
if (bond->curr_active_slave) {
bond_arp_send_all(bond, bond->curr_active_slave);
@@ -3078,9 +2993,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: backup interface %s is now down.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: backup interface %s is now down.\n",
+ bond->dev->name, slave->dev->name);
}
}
}
@@ -3360,9 +3274,8 @@ static void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning(DRV_NAME
- ": Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -3388,9 +3301,8 @@ static void bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning(DRV_NAME
- ": Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warning("Warning: cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
@@ -3539,8 +3451,8 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = (struct net_device *)ptr;
pr_debug("event_dev: %s, event: %lx\n",
- (event_dev ? event_dev->name : "None"),
- event);
+ event_dev ? event_dev->name : "None",
+ event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -3727,7 +3639,7 @@ static int bond_open(struct net_device *bond_dev)
*/
if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
/* something went wrong - fail the open operation */
- return -1;
+ return -ENOMEM;
}
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
@@ -3875,8 +3787,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct mii_ioctl_data *mii = NULL;
int res = 0;
- pr_debug("bond_ioctl: master=%s, cmd=%d\n",
- bond_dev->name, cmd);
+ pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
switch (cmd) {
case SIOCGMIIPHY:
@@ -3945,12 +3856,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
- pr_debug("slave_dev=%p: \n", slave_dev);
+ pr_debug("slave_dev=%p:\n", slave_dev);
if (!slave_dev)
res = -ENODEV;
else {
- pr_debug("slave_dev->name=%s: \n", slave_dev->name);
+ pr_debug("slave_dev->name=%s:\n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
@@ -4059,7 +3970,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
int i;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ (bond_dev ? bond_dev->name : "None"), new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -4077,8 +3988,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
*/
bond_for_each_slave(bond, slave, i) {
- pr_debug("s %p s->p %p c_m %p\n", slave,
- slave->prev, slave->dev->netdev_ops->ndo_change_mtu);
+ pr_debug("s %p s->p %p c_m %p\n",
+ slave,
+ slave->prev,
+ slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -4108,8 +4021,8 @@ unwind:
tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
if (tmp_res) {
- pr_debug("unwind err %d dev %s\n", tmp_res,
- slave->dev->name);
+ pr_debug("unwind err %d dev %s\n",
+ tmp_res, slave->dev->name);
}
}
@@ -4135,7 +4048,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
return bond_alb_set_mac_address(bond_dev, addr);
- pr_debug("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
+ pr_debug("bond=%p, name=%s\n",
+ bond, bond_dev ? bond_dev->name : "None");
/*
* If fail_over_mac is set to active, do nothing and return
@@ -4200,8 +4114,8 @@ unwind:
tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
if (tmp_res) {
- pr_debug("unwind err %d dev %s\n", tmp_res,
- slave->dev->name);
+ pr_debug("unwind err %d dev %s\n",
+ tmp_res, slave->dev->name);
}
}
@@ -4357,9 +4271,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_dev) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
- pr_err(DRV_NAME
- ": %s: Error: bond_xmit_broadcast(): "
- "skb_clone() failed\n",
+ pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
bond_dev->name);
continue;
}
@@ -4425,8 +4337,8 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return bond_alb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
- pr_err(DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
- dev->name, bond->params.mode);
+ pr_err("%s: Error: Unknown bonding mode %d\n",
+ dev->name, bond->params.mode);
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -4462,10 +4374,8 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
break;
default:
/* Should never happen, mode already checked */
- pr_err(DRV_NAME
- ": %s: Error: Unknown bonding mode %d\n",
- bond_dev->name,
- mode);
+ pr_err("%s: Error: Unknown bonding mode %d\n",
+ bond_dev->name, mode);
break;
}
}
@@ -4650,8 +4560,7 @@ static int bond_check_params(struct bond_params *params)
if (mode) {
bond_mode = bond_parse_parm(mode, bond_mode_tbl);
if (bond_mode == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid bonding mode \"%s\"\n",
+ pr_err("Error: Invalid bonding mode \"%s\"\n",
mode == NULL ? "NULL" : mode);
return -EINVAL;
}
@@ -4660,16 +4569,13 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
- pr_info(DRV_NAME
- ": xmit_hash_policy param is irrelevant in"
- " mode %s\n",
+ pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
xmit_hashtype = bond_parse_parm(xmit_hash_policy,
xmit_hashtype_tbl);
if (xmit_hashtype == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid xmit_hash_policy \"%s\"\n",
+ pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
xmit_hash_policy == NULL ? "NULL" :
xmit_hash_policy);
return -EINVAL;
@@ -4679,14 +4585,12 @@ static int bond_check_params(struct bond_params *params)
if (lacp_rate) {
if (bond_mode != BOND_MODE_8023AD) {
- pr_info(DRV_NAME
- ": lacp_rate param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ pr_info("lacp_rate param is irrelevant in mode %s\n",
+ bond_mode_name(bond_mode));
} else {
lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
if (lacp_fast == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid lacp rate \"%s\"\n",
+ pr_err("Error: Invalid lacp rate \"%s\"\n",
lacp_rate == NULL ? "NULL" : lacp_rate);
return -EINVAL;
}
@@ -4696,82 +4600,64 @@ static int bond_check_params(struct bond_params *params)
if (ad_select) {
params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
if (params->ad_select == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid ad_select \"%s\"\n",
+ pr_err("Error: Invalid ad_select \"%s\"\n",
ad_select == NULL ? "NULL" : ad_select);
return -EINVAL;
}
if (bond_mode != BOND_MODE_8023AD) {
- pr_warning(DRV_NAME
- ": ad_select param only affects 802.3ad mode\n");
+ pr_warning("ad_select param only affects 802.3ad mode\n");
}
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning(DRV_NAME
- ": Warning: max_bonds (%d) not in range %d-%d, so it "
- "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning(DRV_NAME
- ": Warning: miimon module parameter (%d), "
- "not in range 0-%d, so it was reset to %d\n",
- miimon, INT_MAX, BOND_LINK_MON_INTERV);
+ pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
+ miimon, INT_MAX, BOND_LINK_MON_INTERV);
miimon = BOND_LINK_MON_INTERV;
}
if (updelay < 0) {
- pr_warning(DRV_NAME
- ": Warning: updelay module parameter (%d), "
- "not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning(DRV_NAME
- ": Warning: downdelay module parameter (%d), "
- "not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning(DRV_NAME
- ": Warning: use_carrier module parameter (%d), "
- "not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning(DRV_NAME
- ": Warning: num_grat_arp (%d) not in range 0-255 so it "
- "was reset to 1 \n", num_grat_arp);
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning(DRV_NAME
- ": Warning: num_unsol_na (%d) not in range 0-255 so it "
- "was reset to 1 \n", num_unsol_na);
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ num_unsol_na);
num_unsol_na = 1;
}
/* reset values for 802.3ad */
if (bond_mode == BOND_MODE_8023AD) {
if (!miimon) {
- pr_warning(DRV_NAME
- ": Warning: miimon must be specified, "
- "otherwise bonding will not detect link "
- "failure, speed and duplex which are "
- "essential for 802.3ad operation\n");
+ pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
@@ -4781,24 +4667,15 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
if (!miimon) {
- pr_warning(DRV_NAME
- ": Warning: miimon must be specified, "
- "otherwise bonding will not detect link "
- "failure and link speed which are essential "
- "for TLB/ALB load balancing\n");
+ pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
}
if (bond_mode == BOND_MODE_ALB) {
- pr_notice(DRV_NAME
- ": In ALB mode you might experience client "
- "disconnections upon reconnection of a link if the "
- "bonding module updelay parameter (%d msec) is "
- "incompatible with the forwarding delay time of the "
- "switch\n",
- updelay);
+ pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
+ updelay);
}
if (!miimon) {
@@ -4806,49 +4683,37 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning(DRV_NAME
- ": Warning: miimon module parameter not set "
- "and updelay (%d) or downdelay (%d) module "
- "parameter is set; updelay and downdelay have "
- "no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning(DRV_NAME
- ": Warning: miimon (%d) and arp_interval (%d) "
- "can't be used simultaneously, disabling ARP "
- "monitoring\n",
- miimon, arp_interval);
+ pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning(DRV_NAME
- ": Warning: updelay (%d) is not a multiple "
- "of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon, (updelay / miimon) * miimon);
+ pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon,
+ (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning(DRV_NAME
- ": Warning: downdelay (%d) is not a multiple "
- "of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning(DRV_NAME
- ": Warning: arp_interval module parameter (%d) "
- ", not in range 0-%d, so it was reset to %d\n",
- arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
+ pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
+ arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
arp_interval = BOND_LINK_ARP_INTERV;
}
@@ -4858,10 +4723,8 @@ static int bond_check_params(struct bond_params *params)
/* not complete check, but should be good enough to
catch mistakes */
if (!isdigit(arp_ip_target[arp_ip_count][0])) {
- pr_warning(DRV_NAME
- ": Warning: bad arp_ip_target module parameter "
- "(%s), ARP monitoring will not be performed\n",
- arp_ip_target[arp_ip_count]);
+ pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[arp_ip_count]);
arp_interval = 0;
} else {
__be32 ip = in_aton(arp_ip_target[arp_ip_count]);
@@ -4871,31 +4734,25 @@ static int bond_check_params(struct bond_params *params)
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning(DRV_NAME
- ": Warning: arp_interval module parameter (%d) "
- "specified without providing an arp_ip_target "
- "parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err(DRV_NAME
- ": arp_validate only supported in active-backup mode\n");
+ pr_err("arp_validate only supported in active-backup mode\n");
return -EINVAL;
}
if (!arp_interval) {
- pr_err(DRV_NAME
- ": arp_validate requires arp_interval\n");
+ pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
}
arp_validate_value = bond_parse_parm(arp_validate,
arp_validate_tbl);
if (arp_validate_value == -1) {
- pr_err(DRV_NAME
- ": Error: invalid arp_validate \"%s\"\n",
+ pr_err("Error: invalid arp_validate \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
}
@@ -4903,17 +4760,14 @@ static int bond_check_params(struct bond_params *params)
arp_validate_value = 0;
if (miimon) {
- pr_info(DRV_NAME
- ": MII link monitoring set to %d ms\n",
- miimon);
+ pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
int i;
- pr_info(DRV_NAME ": ARP monitoring set to %d ms,"
- " validate %s, with %d target(s):",
- arp_interval,
- arp_validate_tbl[arp_validate_value].modename,
- arp_ip_count);
+ pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
+ arp_interval,
+ arp_validate_tbl[arp_validate_value].modename,
+ arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_info(" %s", arp_ip_target[i]);
@@ -4924,21 +4778,15 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_warning(DRV_NAME
- ": Warning: either miimon or arp_interval and "
- "arp_ip_target module parameters must be specified, "
- "otherwise bonding will not detect link failures! see "
- "bonding.txt for details.\n");
+ pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning(DRV_NAME
- ": Warning: %s primary device specified but has no "
- "effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4946,8 +4794,7 @@ static int bond_check_params(struct bond_params *params)
primary_reselect_value = bond_parse_parm(primary_reselect,
pri_reselect_tbl);
if (primary_reselect_value == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid primary_reselect \"%s\"\n",
+ pr_err("Error: Invalid primary_reselect \"%s\"\n",
primary_reselect ==
NULL ? "NULL" : primary_reselect);
return -EINVAL;
@@ -4960,16 +4807,13 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = bond_parse_parm(fail_over_mac,
fail_over_mac_tbl);
if (fail_over_mac_value == -1) {
- pr_err(DRV_NAME
- ": Error: invalid fail_over_mac \"%s\"\n",
+ pr_err("Error: invalid fail_over_mac \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
}
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning(DRV_NAME
- ": Warning: fail_over_mac only affects "
- "active-backup mode.\n");
+ pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
@@ -5076,8 +4920,7 @@ int bond_create(struct net *net, const char *name)
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
bond_setup);
if (!bond_dev) {
- pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
- name);
+ pr_err("%s: eek! can't alloc netdev!\n", name);
res = -ENOMEM;
goto out;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4e00b4f..5acd557 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -19,6 +19,9 @@
* file called LICENSE.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -109,11 +112,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
goto err_no_cmd;
if (command[0] == '+') {
- pr_info(DRV_NAME
- ": %s is being created...\n", ifname);
+ pr_info("%s is being created...\n", ifname);
rv = bond_create(net, ifname);
if (rv) {
- pr_info(DRV_NAME ": Bond creation failed.\n");
+ pr_info("Bond creation failed.\n");
res = rv;
}
} else if (command[0] == '-') {
@@ -122,12 +124,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
rtnl_lock();
bond_dev = bond_get_by_name(net, ifname);
if (bond_dev) {
- pr_info(DRV_NAME ": %s is being deleted...\n",
- ifname);
+ pr_info("%s is being deleted...\n", ifname);
unregister_netdevice(bond_dev);
} else {
- pr_err(DRV_NAME ": unable to delete non-existent %s\n",
- ifname);
+ pr_err("unable to delete non-existent %s\n", ifname);
res = -ENODEV;
}
rtnl_unlock();
@@ -140,8 +140,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err(DRV_NAME ": no command found in bonding_masters."
- " Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
return -EPERM;
}
@@ -225,8 +224,8 @@ static ssize_t bonding_store_slaves(struct device *d,
/* Quick sanity check -- is the bond interface up? */
if (!(bond->dev->flags & IFF_UP)) {
- pr_warning(DRV_NAME ": %s: doing slave updates when "
- "interface is down.\n", bond->dev->name);
+ pr_warning("%s: doing slave updates when interface is down.\n",
+ bond->dev->name);
}
/* Note: We can't hold bond->lock here, as bond_create grabs it. */
@@ -247,17 +246,14 @@ static ssize_t bonding_store_slaves(struct device *d,
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info(DRV_NAME
- ": %s: Interface %s does not exist!\n",
- bond->dev->name, ifname);
+ pr_info("%s: Interface %s does not exist!\n",
+ bond->dev->name, ifname);
ret = -ENODEV;
goto out;
}
if (dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Error: Unable to enslave %s "
- "because it is already up.\n",
+ pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
bond->dev->name, dev->name);
ret = -EPERM;
goto out;
@@ -266,8 +262,7 @@ static ssize_t bonding_store_slaves(struct device *d,
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i)
if (slave->dev == dev) {
- pr_err(DRV_NAME
- ": %s: Interface %s is already enslaved!\n",
+ pr_err("%s: Interface %s is already enslaved!\n",
bond->dev->name, ifname);
ret = -EPERM;
read_unlock(&bond->lock);
@@ -275,8 +270,7 @@ static ssize_t bonding_store_slaves(struct device *d,
}
read_unlock(&bond->lock);
- pr_info(DRV_NAME ": %s: Adding slave %s.\n",
- bond->dev->name, ifname);
+ pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
/* If this is the first slave, then we need to set
the master's hardware address to be the same as the
@@ -313,7 +307,7 @@ static ssize_t bonding_store_slaves(struct device *d,
break;
}
if (dev) {
- pr_info(DRV_NAME ": %s: Removing slave %s\n",
+ pr_info("%s: Removing slave %s\n",
bond->dev->name, dev->name);
res = bond_release(bond->dev, dev);
if (res) {
@@ -323,16 +317,16 @@ static ssize_t bonding_store_slaves(struct device *d,
/* set the slave MTU to the default */
dev_set_mtu(dev, original_mtu);
} else {
- pr_err(DRV_NAME ": unable to remove non-existent"
- " slave %s for bond %s.\n",
- ifname, bond->dev->name);
+ pr_err("unable to remove non-existent slave %s for bond %s.\n",
+ ifname, bond->dev->name);
ret = -ENODEV;
}
goto out;
}
err_no_cmd:
- pr_err(DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
+ pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ bond->dev->name);
ret = -EPERM;
out:
@@ -365,18 +359,16 @@ static ssize_t bonding_store_mode(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME ": unable to update mode of %s"
- " because interface is up.\n", bond->dev->name);
+ pr_err("unable to update mode of %s because interface is up.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid mode value %.*s.\n",
- bond->dev->name,
- (int)strlen(buf) - 1, buf);
+ pr_err("%s: Ignoring invalid mode value %.*s.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
goto out;
} else {
@@ -388,8 +380,8 @@ static ssize_t bonding_store_mode(struct device *d,
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- pr_info(DRV_NAME ": %s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
+ pr_info("%s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
new_value);
}
out:
@@ -421,8 +413,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- "%s: Interface is up. Unable to update xmit policy.\n",
+ pr_err("%s: Interface is up. Unable to update xmit policy.\n",
bond->dev->name);
ret = -EPERM;
goto out;
@@ -430,8 +421,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
+ pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
bond->dev->name,
(int)strlen(buf) - 1, buf);
ret = -EINVAL;
@@ -439,7 +429,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
} else {
bond->params.xmit_policy = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- pr_info(DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
+ pr_info("%s: setting xmit hash policy to %s (%d).\n",
bond->dev->name,
xmit_hashtype_tbl[new_value].modename, new_value);
}
@@ -472,20 +462,18 @@ static ssize_t bonding_store_arp_validate(struct device *d,
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid arp_validate value %s\n",
+ pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
return -EINVAL;
}
if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
- pr_err(DRV_NAME
- ": %s: arp_validate only supported in active-backup mode.\n",
+ pr_err("%s: arp_validate only supported in active-backup mode.\n",
bond->dev->name);
return -EINVAL;
}
- pr_info(DRV_NAME ": %s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
+ pr_info("%s: setting arp_validate to %s (%d).\n",
+ bond->dev->name, arp_validate_tbl[new_value].modename,
+ new_value);
if (!bond->params.arp_validate && new_value)
bond_register_arp(bond);
@@ -523,24 +511,22 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->slave_cnt != 0) {
- pr_err(DRV_NAME
- ": %s: Can't alter fail_over_mac with slaves in bond.\n",
+ pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
return -EPERM;
}
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid fail_over_mac value %s.\n",
+ pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
return -EINVAL;
}
bond->params.fail_over_mac = new_value;
- pr_info(DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
- bond->dev->name, fail_over_mac_tbl[new_value].modename,
- new_value);
+ pr_info("%s: Setting fail_over_mac to %s (%d).\n",
+ bond->dev->name, fail_over_mac_tbl[new_value].modename,
+ new_value);
return count;
}
@@ -571,31 +557,26 @@ static ssize_t bonding_store_arp_interval(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no arp_interval value specified.\n",
+ pr_err("%s: no arp_interval value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+ pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
}
- pr_info(DRV_NAME
- ": %s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.arp_interval)
bond->dev->priv_flags |= IFF_MASTER_ARPMON;
if (bond->params.miimon) {
- pr_info(DRV_NAME
- ": %s: ARP monitoring cannot be used with MII monitoring. "
- "%s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
if (delayed_work_pending(&bond->mii_work)) {
cancel_delayed_work(&bond->mii_work);
@@ -603,10 +584,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
}
}
if (!bond->params.arp_targets[0]) {
- pr_info(DRV_NAME
- ": %s: ARP monitoring has been set up, "
- "but no ARP targets have been specified.\n",
- bond->dev->name);
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@@ -666,8 +645,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for adds */
if (buf[0] == '+') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err(DRV_NAME
- ": %s: invalid ARP target %pI4 specified for addition\n",
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
@@ -675,23 +653,20 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for an empty slot to put the target in, and check for dupes */
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) { /* duplicate */
- pr_err(DRV_NAME
- ": %s: ARP target %pI4 is already present\n",
+ pr_err("%s: ARP target %pI4 is already present\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
if (targets[i] == 0) {
- pr_info(DRV_NAME
- ": %s: adding ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: adding ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
done = 1;
targets[i] = newtarget;
}
}
if (!done) {
- pr_err(DRV_NAME
- ": %s: ARP target table is full!\n",
+ pr_err("%s: ARP target table is full!\n",
bond->dev->name);
ret = -EINVAL;
goto out;
@@ -699,8 +674,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
} else if (buf[0] == '-') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err(DRV_NAME
- ": %s: invalid ARP target %pI4 specified for removal\n",
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
@@ -709,9 +683,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) {
int j;
- pr_info(DRV_NAME
- ": %s: removing ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: removing ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
targets[j] = targets[j+1];
@@ -720,16 +693,14 @@ static ssize_t bonding_store_arp_targets(struct device *d,
}
}
if (!done) {
- pr_info(DRV_NAME
- ": %s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
} else {
- pr_err(DRV_NAME ": no command found in arp_ip_targets file"
- " for bond %s. Use +<addr> or -<addr>.\n",
- bond->dev->name);
+ pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
@@ -761,41 +732,34 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- pr_err(DRV_NAME
- ": %s: Unable to set down delay as MII monitoring is disabled\n",
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no down delay value specified.\n",
- bond->dev->name);
+ pr_err("%s: no down delay value specified.\n", bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
if ((new_value % bond->params.miimon) != 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: down delay (%d) is not a "
- "multiple of miimon (%d), delay rounded "
- "to %d ms\n",
+ pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
bond->dev->name, new_value,
bond->params.miimon,
(new_value / bond->params.miimon) *
bond->params.miimon);
}
bond->params.downdelay = new_value / bond->params.miimon;
- pr_info(DRV_NAME ": %s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
@@ -823,41 +787,35 @@ static ssize_t bonding_store_updelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- pr_err(DRV_NAME
- ": %s: Unable to set up delay as MII monitoring is disabled\n",
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no up delay value specified.\n",
+ pr_err("%s: no up delay value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
if ((new_value % bond->params.miimon) != 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: up delay (%d) is not a "
- "multiple of miimon (%d), updelay rounded "
- "to %d ms\n",
+ pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
bond->dev->name, new_value,
bond->params.miimon,
(new_value / bond->params.miimon) *
bond->params.miimon);
}
bond->params.updelay = new_value / bond->params.miimon;
- pr_info(DRV_NAME ": %s: Setting up delay to %d.\n",
- bond->dev->name, bond->params.updelay * bond->params.miimon);
-
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
}
out:
@@ -889,16 +847,14 @@ static ssize_t bonding_store_lacp(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Unable to update LACP rate because interface is up.\n",
+ pr_err("%s: Unable to update LACP rate because interface is up.\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (bond->params.mode != BOND_MODE_8023AD) {
- pr_err(DRV_NAME
- ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
+ pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
bond->dev->name);
ret = -EPERM;
goto out;
@@ -908,12 +864,11 @@ static ssize_t bonding_store_lacp(struct device *d,
if ((new_value == 1) || (new_value == 0)) {
bond->params.lacp_fast = new_value;
- pr_info(DRV_NAME ": %s: Setting LACP rate to %s (%d).\n",
+ pr_info("%s: Setting LACP rate to %s (%d).\n",
bond->dev->name, bond_lacp_tbl[new_value].modename,
new_value);
} else {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid LACP rate value %.*s.\n",
+ pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
}
@@ -943,9 +898,8 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Unable to update ad_select because interface "
- "is up.\n", bond->dev->name);
+ pr_err("%s: Unable to update ad_select because interface is up.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
@@ -954,13 +908,11 @@ static ssize_t bonding_store_ad_select(struct device *d,
if (new_value != -1) {
bond->params.ad_select = new_value;
- pr_info(DRV_NAME
- ": %s: Setting ad_select to %s (%d).\n",
- bond->dev->name, ad_select_tbl[new_value].modename,
- new_value);
+ pr_info("%s: Setting ad_select to %s (%d).\n",
+ bond->dev->name, ad_select_tbl[new_value].modename,
+ new_value);
} else {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid ad_select value %.*s.\n",
+ pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
}
@@ -990,15 +942,13 @@ static ssize_t bonding_store_n_grat_arp(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no num_grat_arp value specified.\n",
+ pr_err("%s: no num_grat_arp value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0 || new_value > 255) {
- pr_err(DRV_NAME
- ": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
+ pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
goto out;
@@ -1031,16 +981,14 @@ static ssize_t bonding_store_n_unsol_na(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no num_unsol_na value specified.\n",
+ pr_err("%s: no num_unsol_na value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0 || new_value > 255) {
- pr_err(DRV_NAME
- ": %s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
+ pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
goto out;
@@ -1075,40 +1023,31 @@ static ssize_t bonding_store_miimon(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no miimon value specified.\n",
+ pr_err("%s: no miimon value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
- pr_info(DRV_NAME
- ": %s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
- pr_info(DRV_NAME
- ": %s: Note: Updating updelay (to %d) "
- "since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info(DRV_NAME
- ": %s: Note: Updating downdelay (to %d) "
- "since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
if (bond->params.arp_interval) {
- pr_info(DRV_NAME
- ": %s: MII monitoring cannot be used with "
- "ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
bond->params.arp_interval = 0;
bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
if (bond->params.arp_validate) {
@@ -1176,17 +1115,15 @@ static ssize_t bonding_store_primary(struct device *d,
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: Unable to set primary slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
+ pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
} else {
bond_for_each_slave(bond, slave, i) {
if (strnicmp
(slave->dev->name, buf,
strlen(slave->dev->name)) == 0) {
- pr_info(DRV_NAME
- ": %s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
bond_select_active_slave(bond);
@@ -1197,15 +1134,13 @@ static ssize_t bonding_store_primary(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info(DRV_NAME
- ": %s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
} else {
- pr_info(DRV_NAME
- ": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
}
}
out:
@@ -1244,8 +1179,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
new_value = bond_parse_parm(buf, pri_reselect_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid primary_reselect value %.*s.\n",
+ pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
bond->dev->name,
(int) strlen(buf) - 1, buf);
ret = -EINVAL;
@@ -1253,7 +1187,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
}
bond->params.primary_reselect = new_value;
- pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n",
+ pr_info("%s: setting primary_reselect to %s (%d).\n",
bond->dev->name, pri_reselect_tbl[new_value].modename,
new_value);
@@ -1291,20 +1225,18 @@ static ssize_t bonding_store_carrier(struct device *d,
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no use_carrier value specified.\n",
+ pr_err("%s: no use_carrier value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if ((new_value == 0) || (new_value == 1)) {
bond->params.use_carrier = new_value;
- pr_info(DRV_NAME ": %s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting use_carrier to %d.\n",
+ bond->dev->name, new_value);
} else {
- pr_info(DRV_NAME
- ": %s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Ignoring invalid use_carrier value %d.\n",
+ bond->dev->name, new_value);
}
out:
return count;
@@ -1349,8 +1281,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode))
- pr_info(DRV_NAME ": %s: Unable to change active slave;"
- " %s is in mode %d\n",
+ pr_info("%s: Unable to change active slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
else {
bond_for_each_slave(bond, slave, i) {
@@ -1361,9 +1292,9 @@ static ssize_t bonding_store_active_slave(struct device *d,
new_active = slave;
if (new_active == old_active) {
/* do nothing */
- pr_info(DRV_NAME
- ": %s: %s is already the current active slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: %s is already the current active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
goto out;
}
else {
@@ -1371,16 +1302,15 @@ static ssize_t bonding_store_active_slave(struct device *d,
(old_active) &&
(new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info(DRV_NAME
- ": %s: Setting %s as active slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: Setting %s as active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
bond_change_active_slave(bond, new_active);
}
else {
- pr_info(DRV_NAME
- ": %s: Could not set %s as active slave; "
- "either %s is down or the link is down.\n",
- bond->dev->name, slave->dev->name,
+ pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ bond->dev->name,
+ slave->dev->name,
slave->dev->name);
}
goto out;
@@ -1391,14 +1321,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info(DRV_NAME
- ": %s: Setting active slave to None.\n",
+ pr_info("%s: Setting active slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
} else {
- pr_info(DRV_NAME ": %s: Unable to set %.*s"
- " as active slave as it is not a slave.\n",
+ pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
}
}
@@ -1600,8 +1528,7 @@ int bond_create_sysfs(void)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(&init_net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already "
- "exists in sysfs",
+ pr_err("network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 8c485aa..05b7517 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -50,7 +50,7 @@ config CAN_TI_HECC
config CAN_MCP251X
tristate "Microchip MCP251x SPI CAN controllers"
- depends on CAN_DEV && SPI
+ depends on CAN_DEV && SPI && HAS_DMA
---help---
Driver for the Microchip MCP251x SPI CAN controllers.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cbe3fce..166cc7e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -474,7 +474,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
reg_msr = at91_read(priv, AT91_MSR(mb));
if (reg_msr & AT91_MSR_MRTR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+ cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
@@ -1037,7 +1037,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || !irq) {
+ if (!res || irq <= 0) {
err = -ENODEV;
goto exit_put;
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index c7fc1de..0ec1524 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -392,7 +392,7 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
cf->can_id |= CAN_RTR_FLAG;
/* get data length code */
- cf->can_dlc = bfin_read16(&reg->chl[obj].dlc);
+ cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
/* get payload */
for (i = 0; i < 8; i += 2) {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 78b1b69..1a72ca0 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -403,9 +403,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (i = 1; i < RXBDAT_OFF; i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
- len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
- if (len > 8)
- len = 8;
+
+ len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
@@ -455,13 +454,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
}
/* Data length */
- frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
- if (frame->can_dlc > 8) {
- dev_warn(&spi->dev, "invalid frame recevied\n");
- priv->net->stats.rx_errors++;
- dev_kfree_skb(skb);
- return;
- }
+ frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
priv->net->stats.rx_packets++;
@@ -997,7 +990,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
goto error_tx_buf;
}
priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
- if (!priv->spi_tx_buf) {
+ if (!priv->spi_rx_buf) {
ret = -ENOMEM;
goto error_rx_buf;
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index bb06dfb..07346f88 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -297,7 +297,8 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
frame->can_id |= can_id >> 1;
if (can_id & 1)
frame->can_id |= CAN_RTR_FLAG;
- frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
+
+ frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
if (!(frame->can_id & CAN_RTR_FLAG)) {
void __iomem *data = &regs->rx.dsr1_0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b4ba88a..542a4f7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -293,15 +293,14 @@ static void sja1000_rx(struct net_device *dev)
uint8_t fi;
uint8_t dreg;
canid_t id;
- uint8_t dlc;
int i;
+ /* create zero'ed CAN frame buffer */
skb = alloc_can_skb(dev, &cf);
if (skb == NULL)
return;
fi = priv->read_reg(priv, REG_FI);
- dlc = fi & 0x0F;
if (fi & FI_FF) {
/* extended frame format (EFF) */
@@ -318,16 +317,15 @@ static void sja1000_rx(struct net_device *dev)
| (priv->read_reg(priv, REG_ID2) >> 5);
}
- if (fi & FI_RTR)
+ if (fi & FI_RTR) {
id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = get_can_dlc(fi & 0x0F);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = priv->read_reg(priv, dreg++);
+ }
cf->can_id = id;
- cf->can_dlc = dlc;
- for (i = 0; i < dlc; i++)
- cf->data[i] = priv->read_reg(priv, dreg++);
-
- while (i < 8)
- cf->data[i++] = 0;
/* release receive buffer */
priv->write_reg(priv, REG_CMR, CMD_RRB);
@@ -335,7 +333,7 @@ static void sja1000_rx(struct net_device *dev)
netif_rx(skb);
stats->rx_packets++;
- stats->rx_bytes += dlc;
+ stats->rx_bytes += cf->can_dlc;
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 07e8016..5c993c2 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -552,7 +552,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
if (data & HECC_CANMCF_RTR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = data & 0xF;
+ cf->can_dlc = get_can_dlc(data & 0xF);
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
*(u32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 591eb0eb..efbb05c7 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -316,7 +316,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
return;
cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
- cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
+ cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF);
if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 6782223..8d0be26 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1163,7 +1163,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
- memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
+ memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index af93216..0e79cef 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1325,8 +1325,7 @@ net_open(struct net_device *dev)
write_irq(dev, lp->chip_type, dev->irq);
ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
if (ret) {
- if (net_debug)
- printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
+ printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
goto bad_out;
}
}
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd147..318a018 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!nr_frags) {
offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = sd->pg_chunk.va + 2;
- }
- len -= offset;
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
- prefetch(qs->lro_va);
+ if ((pi->rx_offload & T3_RX_CSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
+
+ len -= offset;
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
return;
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
- struct net_device *dev = qs->netdev;
- struct port_info *pi = netdev_priv(dev);
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 8edac89..33c4fe2 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2272,7 +2272,7 @@ static int emac_mii_reset(struct mii_bus *bus)
unsigned int clk_div;
int mdio_bus_freq = emac_bus_frequency;
- if (mdio_max_freq & mdio_bus_freq)
+ if (mdio_max_freq && mdio_bus_freq)
clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
else
clk_div = 0xFF;
@@ -2711,6 +2711,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ clk_enable(emac_clk);
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
@@ -2720,7 +2722,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
goto netdev_reg_err;
}
- clk_enable(emac_clk);
/* MII/Phy intialisation, mdio bus registration */
emac_mii = mdiobus_alloc();
@@ -2760,6 +2761,7 @@ mdiobus_quit:
netdev_reg_err:
mdio_alloc_err:
+ clk_disable(emac_clk);
no_irq_res:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 929701c..839fb2b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1829,6 +1829,7 @@ static int e100_alloc_cbs(struct nic *nic)
&nic->cbs_dma_addr);
if (!nic->cbs)
return -ENOMEM;
+ memset(nic->cbs, 0, count * sizeof(struct cb));
for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
@@ -1837,7 +1838,6 @@ static int e100_alloc_cbs(struct nic *nic)
cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
cb->link = cpu_to_le32(nic->cbs_dma_addr +
((i+1) % count) * sizeof(struct cb));
- cb->skb = NULL;
}
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2a567df..e8932db 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -326,6 +326,8 @@ struct e1000_adapter {
/* for ioport free */
int bars;
int need_ioport;
+
+ bool discarding;
};
enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9..7655436 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1698,18 +1698,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case E1000_RXBUFFER_256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case E1000_RXBUFFER_1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -2802,13 +2790,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
@@ -3176,13 +3164,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* however with the new *_jumbo_rx* routines, jumbo receives will use
* fragmented skbs */
- if (max_frame <= E1000_RXBUFFER_256)
- adapter->rx_buffer_len = E1000_RXBUFFER_256;
- else if (max_frame <= E1000_RXBUFFER_512)
- adapter->rx_buffer_len = E1000_RXBUFFER_512;
- else if (max_frame <= E1000_RXBUFFER_1024)
- adapter->rx_buffer_len = E1000_RXBUFFER_1024;
- else if (max_frame <= E1000_RXBUFFER_2048)
+ if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
else
#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
@@ -3850,13 +3832,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
/* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+ * packet, if thats the case we need to toss it. In fact, we
+ * to toss every packet with the EOP bit clear and the next
+ * frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->discarding = true;
+
+ if (adapter->discarding) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->discarding = false;
goto next_desc;
}
@@ -4015,11 +4006,21 @@ check_page:
}
}
- if (!buffer_info->dma)
+ if (!buffer_info->dma) {
buffer_info->dma = pci_map_page(pdev,
buffer_info->page, 0,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ put_page(buffer_info->page);
+ dev_kfree_skb(skb);
+ buffer_info->page = NULL;
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ }
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4110,6 +4111,13 @@ map_skb:
skb->data,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
/*
* XXX if it was allocated cleanly it will never map to a
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index c1a42cf..02d67d0 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
@@ -1290,7 +1292,6 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
- u32 led_ctrl;
s32 ret_val;
ctrl = er32(CTRL);
@@ -1305,11 +1306,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
break;
case e1000_phy_igp_2:
ret_val = e1000e_copper_link_setup_igp(hw);
- /* Setup activity LED */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
break;
default:
return -E1000_ERR_PHY;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cebbd90..d236efa 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
/* CRC Stripping defines */
#define FLAG2_CRC_STRIPPING (1 << 0)
#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
+#define FLAG2_IS_DISCARDING (1 << 2)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -582,7 +583,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
-extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23..e2aa3b7 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf4..eccf29b 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -818,6 +818,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
+ bool adaptive_ifs;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246..8b6ecd1 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
- e1000e_get_phy_id(hw);
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ /*
+ * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * set slow mode and try to get the PHY id again.
+ */
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ }
phy->type = e1000e_get_phy_type_from_id(phy->id);
switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
+out:
return ret_val;
}
@@ -454,6 +474,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count--;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* LED operations */
switch (mac->type) {
@@ -1074,16 +1096,44 @@ out:
/**
+ * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 data;
+
+ ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data |= HV_KMRN_MDIO_SLOW;
+
+ ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+ return ret_val;
+}
+
+/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 phy_data;
if (hw->mac.type != e1000_pchlan)
return ret_val;
+ /* Set MDIO slow mode before any other MDIO access */
+ if (hw->phy.type == e1000_phy_82577) {
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
if (((hw->phy.type == e1000_phy_82577) &&
((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1116,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+ hw->phy.ops.release(hw);
if (ret_val)
goto out;
- hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
+ if (ret_val)
+ goto out;
+ /* Workaround for link disconnects on a busy hub in half duplex */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ &phy_data);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(BM_PORT_CTRL_PAGE, 17),
+ phy_data & 0x00FF);
+release:
+ hw->phy.ops.release(hw);
out:
return ret_val;
}
@@ -1182,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
mdelay(10);
+ /* Perform any necessary post-reset workarounds */
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
@@ -2482,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
+ /* Perform any necessary post-reset workarounds */
+ if (hw->mac.type == e1000_pchlan)
+ ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -2526,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
ew32(KABGTXD, kab);
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
out:
return ret_val;
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c175..2fa9b36a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
- e1e_flush();
- }
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
}
/**
@@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- rar_high |= E1000_RAH_AV;
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
}
/**
@@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
mac->current_ifs_val = 0;
mac->ifs_min_val = IFS_MIN;
mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
+out:
+ return;
}
/**
@@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
mac->in_ifs_mode = true;
@@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
+out:
+ return;
}
/**
@@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
s32 ret_val, hdr_csum, csum;
u8 i, len;
+ hw->mac.tx_pkt_filtering = true;
+
/* No manageability, no filtering */
if (!e1000e_check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
/*
@@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val != 0) {
+ if (ret_val) {
hw->mac.tx_pkt_filtering = false;
- return ret_val;
+ goto out;
}
/* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
- return 1;
+ goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
- hw->mac.tx_pkt_filtering = true;
- return 1;
+out:
+ return hw->mac.tx_pkt_filtering;
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697..57f149b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- /* !EOP means multiple descriptors were used to store a single
- * packet, also make sure the frame isn't just CRC only */
- if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+ /*
+ * !EOP means multiple descriptors were used to store a single
+ * packet, if that's the case we need to toss it. In fact, we
+ * need to toss every packet with the EOP bit clear and the
+ * next frame that _does_ have the EOP bit set, as it is by
+ * definition only a frame fragment
+ */
+ if (unlikely(!(status & E1000_RXD_STAT_EOP)))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
/* All receives must fit into a single buffer */
e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
+ if (status & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
PCI_DMA_FROMDEVICE);
buffer_info->dma = 0;
- if (!(staterr & E1000_RXD_STAT_EOP)) {
+ /* see !EOP comment in other rx routine */
+ if (!(staterr & E1000_RXD_STAT_EOP))
+ adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+ if (adapter->flags2 & FLAG2_IS_DISCARDING) {
e_dbg("Packet Split buffers didn't pick up the full "
"packet\n");
dev_kfree_skb_irq(skb);
+ if (staterr & E1000_RXD_STAT_EOP)
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
goto next_desc;
}
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, adapter->hw.hw_addr + rx_ring->head);
writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -2333,18 +2350,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl &= ~E1000_RCTL_SZ_4096;
rctl |= E1000_RCTL_BSEX;
switch (adapter->rx_buffer_len) {
- case 256:
- rctl |= E1000_RCTL_SZ_256;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 512:
- rctl |= E1000_RCTL_SZ_512;
- rctl &= ~E1000_RCTL_BSEX;
- break;
- case 1024:
- rctl |= E1000_RCTL_SZ_1024;
- rctl &= ~E1000_RCTL_BSEX;
- break;
case 2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -3315,24 +3320,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- adapter->stats.scc += phy_data;
+ if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+ adapter->stats.scc += phy_data;
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- adapter->stats.ecol += phy_data;
+ if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+ adapter->stats.ecol += phy_data;
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- adapter->stats.mcc += phy_data;
+ if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+ adapter->stats.mcc += phy_data;
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- adapter->stats.latecol += phy_data;
+ if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+ adapter->stats.latecol += phy_data;
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- adapter->stats.dc += phy_data;
+ if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+ adapter->stats.dc += phy_data;
} else {
adapter->stats.scc += er32(SCC);
adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3365,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- hw->mac.collision_delta = phy_data;
+ if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+ hw->mac.collision_delta = phy_data;
} else {
hw->mac.collision_delta = er32(COLC);
}
@@ -3372,8 +3377,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
- adapter->stats.tncrs += phy_data;
+ if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+ adapter->stats.tncrs += phy_data;
} else {
if ((hw->mac.type != e1000_82574) &&
(hw->mac.type != e1000_82583))
@@ -3781,7 +3786,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3962,13 +3967,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_put_txbuf(adapter, buffer_info);;
}
@@ -4317,13 +4322,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* fragmented skbs
*/
- if (max_frame <= 256)
- adapter->rx_buffer_len = 256;
- else if (max_frame <= 512)
- adapter->rx_buffer_len = 512;
- else if (max_frame <= 1024)
- adapter->rx_buffer_len = 1024;
- else if (max_frame <= 2048)
+ if (max_frame <= 2048)
adapter->rx_buffer_len = 2048;
else
adapter->rx_buffer_len = 4096;
@@ -4674,6 +4673,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
@@ -4825,6 +4825,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0a..7f3ceb9 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
- /*
- * If the PHY ID is still unknown, we may have an 82577
- * without link. We will try again after setting Slow MDIC
- * mode. No harm in trying again in this case since the PHY
- * ID is unknown at this point anyway.
- */
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- goto out;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
- phy->ops.release(hw);
-
retry_count++;
}
out:
- /* Revert to MDIO fast mode, if applicable */
- if (retry_count) {
- ret_val = phy->ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
- phy->ops.release(hw);
- }
-
return ret_val;
}
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
}
/**
- * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
- * @hw: pointer to the HW structure
- * @slow: true for slow mode, false for normal mode
- *
- * Assumes semaphore already acquired.
- **/
-s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
-{
- s32 ret_val = 0;
- u16 data = 0;
-
- /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
- hw->phy.addr = 1;
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
- if (ret_val)
- goto out;
-
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
- (0x2180 | (slow << 10)));
- if (ret_val)
- goto out;
-
- /* dummy read when reverting to fast mode - throw away result */
- if (!slow)
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
-
-out:
- return ret_val;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
- bool in_slow_mode = false;
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
- /* Workaround failure in MDIO access while cable is disconnected */
- if ((hw->phy.type == e1000_phy_82577) &&
- !(er32(STATUS) & E1000_STATUS_LU)) {
- ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
- if (ret_val)
- goto out;
-
- in_slow_mode = true;
- }
-
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
- /* Revert to MDIO fast mode, if applicable */
- if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
- ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
-
if (!locked)
hw->phy.ops.release(hw);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 25fabb3..d5160ed 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -46,6 +46,11 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mdio __iomem *regs;
+};
+
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
@@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
- return (void __iomem __force *)bus->priv;
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ return priv->regs;
}
/*
@@ -266,6 +273,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
{
struct device_node *np = ofdev->node;
struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
@@ -274,14 +282,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
u64 addr = 0, size = 0;
int err = 0;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
new_bus = mdiobus_alloc();
if (NULL == new_bus)
- return -ENOMEM;
+ goto err_free_priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
+ new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
/* Set the PHY base address */
@@ -291,6 +304,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err = -ENOMEM;
goto err_free_bus;
}
+ priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
@@ -298,8 +312,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
-
- new_bus->priv = (void __force *)regs;
+ priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -392,10 +405,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
- iounmap(regs);
+ iounmap(priv->map);
err_free_bus:
kfree(new_bus);
-
+err_free_priv:
+ kfree(priv);
return err;
}
@@ -404,14 +418,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(device, NULL);
- iounmap(fsl_pq_mdio_get_regs(bus));
+ iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
+ kfree(priv);
return 0;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6850dc0..8bd3c9f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -357,8 +356,11 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
- if (priv->rx_filer_enable)
+ if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+ gfar_write(&regs->rir0, DEFAULT_RIR0);
+ }
if (priv->rx_csum_enable)
rctrl |= RCTRL_CHECKSUMMING;
@@ -414,6 +416,36 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct netdev_queue *txq;
+ unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_dropped = rx_dropped;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ txq = netdev_get_tx_queue(dev, i);
+ tx_bytes += txq->tx_bytes;
+ tx_packets += txq->tx_packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+
+ return &dev->stats;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -422,7 +454,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_select_queue = gfar_select_queue,
+ .ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -472,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
return priv->vlgrp || priv->rx_csum_enable;
}
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_get_queue_mapping(skb);
-}
static void free_tx_pointers(struct gfar_private *priv)
{
int i = 0;
@@ -1022,6 +1050,9 @@ static int gfar_probe(struct of_device *ofdev,
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
+ /* enable filer if using multiple RX queues*/
+ if(priv->num_rx_queues > 1)
+ priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1937,7 +1968,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- dev->stats.tx_bytes += skb->len;
+ txq->tx_bytes += skb->len;
+ txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2295,8 +2327,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->skb_dirtytx = skb_dirtytx;
tx_queue->dirty_tx = bdp;
- dev->stats.tx_packets += howmany;
-
return howmany;
}
@@ -2434,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
fcb = (struct rxfcb *)skb->data;
/* Remove the FCB from the skb */
- skb_set_queue_mapping(skb, fcb->rq);
/* Remove the padded bytes, if there are any */
- if (amount_pull)
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
+ }
if (priv->rx_csum_enable)
gfar_rx_checksum(skb, fcb);
@@ -2510,22 +2541,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
}
} else {
/* Increment the number of packets */
- dev->stats.rx_packets++;
+ rx_queue->stats.rx_packets++;
howmany++;
if (likely(skb)) {
pkt_len = bdp->length - ETH_FCS_LEN;
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
- dev->stats.rx_bytes += pkt_len;
-
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull);
} else {
if (netif_msg_rx_err(priv))
printk(KERN_WARNING
"%s: Missing skb!\n", dev->name);
- dev->stats.rx_dropped++;
+ rx_queue->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index cbb4510..3d72dc4 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -333,7 +333,7 @@ extern const char gfar_driver_version[];
#define IMASK_BSY 0x20000000
#define IMASK_EBERR 0x10000000
#define IMASK_MSRO 0x04000000
-#define IMASK_GRSC 0x02000000
+#define IMASK_GTSC 0x02000000
#define IMASK_BABT 0x01000000
#define IMASK_TXC 0x00800000
#define IMASK_TXEEN 0x00400000
@@ -344,7 +344,7 @@ extern const char gfar_driver_version[];
#define IMASK_XFUN 0x00010000
#define IMASK_RXB0 0x00008000
#define IMASK_MAG 0x00000800
-#define IMASK_GTSC 0x00000100
+#define IMASK_GRSC 0x00000100
#define IMASK_RXFEN0 0x00000080
#define IMASK_FIR 0x00000008
#define IMASK_FIQ 0x00000004
@@ -401,6 +401,10 @@ extern const char gfar_driver_version[];
#define FPR_FILER_MASK 0xFFFFFFFF
#define MAX_FILER_IDX 0xFF
+/* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+#define DEFAULT_RIR0 0x05397700
+
/* RQFCR register bits */
#define RQFCR_GPI 0x80000000
#define RQFCR_HASHTBL_Q 0x00000000
@@ -936,6 +940,15 @@ struct gfar_priv_tx_q {
unsigned short txtime;
};
+/*
+ * Per RX queue stats
+ */
+struct rx_q_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+};
+
/**
* struct gfar_priv_rx_q - per rx queue structure
* @rxlock: per queue rx spin lock
@@ -958,6 +971,7 @@ struct gfar_priv_rx_q {
struct rxbd8 *cur_rx;
struct net_device *dev;
struct gfar_priv_grp *grp;
+ struct rx_q_stats stats;
u16 skb_currx;
u16 qindex;
unsigned int rx_ring_size;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ae5f11c..bdadf3e 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -248,6 +248,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
struct bpqdev *bpq;
+ struct net_device *orig_dev;
int size;
/*
@@ -282,8 +283,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
bpq = netdev_priv(dev);
+ orig_dev = dev;
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
- dev->stats.tx_dropped++;
+ orig_dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 090a6d3..052c740 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -87,6 +87,7 @@ History:
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
/* copy out MAC address */
- for (z = 0; z < sizeof(dev->dev_addr); z++)
+ for (z = 0; z < ETH_ALEN; z++)
dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
/* print config */
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e8e9e91..c505b50d 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
- E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
- E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 5c9d73e..3670a66 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
- if (ret_val)
- goto out;
-
- /* Set number of link attempts before downshift */
- ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
- if (ret_val)
- goto out;
- phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
- ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
out:
return ret_val;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ac9d527..f771a6c 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+ if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78963a0..c881347 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
int err = 0;
if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
} else {
- switch (hw->mac.type) {
- case e1000_82575:
- wr32(E1000_MSIXBM(0),
- (E1000_EICR_RX_QUEUE0 |
- E1000_EICR_TX_QUEUE0 |
- E1000_EIMS_OTHER));
- break;
- case e1000_82580:
- case e1000_82576:
- wr32(E1000_IVAR0, E1000_IVAR_VALID);
- break;
- default:
- break;
- }
+ igb_assign_vector(adapter->q_vector[0], 0);
}
if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
}
if (adapter->msix_entries)
igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
rd32(E1000_ICR);
@@ -1306,13 +1296,8 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- if (mac->type < e1000_82576) {
- fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
- fc->low_water = fc->high_water - 8;
- } else {
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
- fc->low_water = fc->high_water - 16;
- }
+ fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
fc->current_mode = fc->requested_mode;
@@ -3427,7 +3412,7 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -3589,6 +3574,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -3610,7 +3596,6 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index e9dd95f..2aa71a7 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1963,7 +1963,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
@@ -2117,6 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
+ buffer_info->mapped_as_page = false;
buffer_info->dma = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
@@ -2126,6 +2127,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ count++;
i++;
if (i == tx_ring->count)
i = 0;
@@ -2146,7 +2148,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma))
goto dma_error;
- count++;
}
tx_ring->buffer_info[i].skb = skb;
@@ -2163,14 +2164,14 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
igbvf_put_txbuf(adapter, buffer_info);
}
@@ -2763,7 +2764,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
+ "PF still in reset state, assigning new address."
+ " Is the PF interface up?\n");
random_ether_addr(hw->mac.addr);
} else {
err = hw->mac.ops.read_mac_addr(hw);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01..593d1a4 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1363,13 +1363,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- count--;
-
- while (count >= 0) {
+ if (count)
count--;
- i--;
- if (i < 0)
+
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 21b41f4..bfef0eb 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb5..303e7bd 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index e2d5343..35a06b4758 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 link_speed = 0;
+ bool link_up;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
goto out;
#endif /* CONFIG_DCB */
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
if (ret_val)
@@ -510,6 +532,40 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return 0;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
+
+ if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
+ (an_reg & MDIO_STAT1_LSTATUS))
+ break;
+
+ msleep(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ hw_dbg(hw, "Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_check_mac_link_82598 - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -589,6 +645,10 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != 0))
+ *link_up = false;
+
/* if link is down, zero out the current_mode */
if (*link_up == false) {
hw->fc.current_mode = ixgbe_fc_none;
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 5383405..b49bd6b 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 688b8ca..21f158f 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 27f3214..dfff0ff 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index a156228..9aea4f0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 64a9fa1..5caafd4 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f302638..f0e9279 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index ebbe53c..cc728fa 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index ec8a252..4f7a26a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 9e5e282..0f3f791 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 3c7a79a..dd4883f 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
- adapter->dcb_set_bitmap |= BIT_PG_RX;
+ adapter->dcb_set_bitmap |= BIT_PG_TX;
adapter->dcb_set_bitmap |= BIT_RESETLINK;
}
}
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (!adapter->dcb_set_bitmap)
return DCB_NO_HW_CHG;
+ ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ adapter->ring_feature[RING_F_DCB].indices);
+
+ if (ret)
+ return DCB_NO_HW_CHG;
+
/*
* Only take down the adapter if the configuration change
* requires a reset.
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
- ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
- if (ret) {
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
- return DCB_NO_HW_CHG;
- }
-
if (adapter->dcb_cfg.pfc_mode_enable) {
if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
(adapter->hw.fc.current_mode != ixgbe_fc_pfc))
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3..d77961f 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index da32a10..e9a20c8 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index de8ff53..abf4b2b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 35ea8c9..951b73c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,7 +52,7 @@ static const char ixgbe_driver_string[] =
#define DRV_VERSION "2.0.44-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
+static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -262,10 +262,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
tc = reg_idx >> 2;
txoff = IXGBE_TFCS_TXOFF0;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
tc = 0;
txoff = IXGBE_TFCS_TXOFF;
if (dcb_i == 8) {
@@ -284,6 +286,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
tc += (reg_idx - 96) >> 4;
}
}
+ break;
+ default:
+ tc = 0;
}
txoff <<= tc;
}
@@ -4373,6 +4378,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ /*
+ * pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -4511,6 +4521,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+ u64 non_eop_descs = 0, restart_queue = 0;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
u64 rsc_count = 0;
@@ -4528,10 +4539,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i].restart_queue;
+ adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
@@ -4915,7 +4928,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
iph->daddr, 0,
IPPROTO_TCP,
0);
- } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -5003,7 +5016,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
+ __be16 protocol;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+ const struct vlan_ethhdr *vhdr =
+ (const struct vlan_ethhdr *)skb->data;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ } else {
+ protocol = skb->protocol;
+ }
+
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5143,19 +5167,19 @@ dma_error:
tx_buffer_info->dma = 0;
tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
- count--;
+ if (count)
+ count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
+ while (count--) {
+ if (i==0)
i += tx_ring->count;
+ i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
}
- return count;
+ return 0;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5305,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
return txq;
+ }
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5552,6 +5579,10 @@ static void ixgbe_netpoll(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -5732,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* Make it possible the adapter to be woken up via WOL */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
/*
* If there is a fan on this device and it has failed log the
* failure.
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 9ecad17..1c1efd3 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9b700f5..9cf5f3b 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index f3e8d52..9eafddf 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -841,6 +841,8 @@
#define IXGBE_MPVC 0x04318
#define IXGBE_SGMIIC 0x04314
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
/* Omer CORECTL */
#define IXGBE_CORECTL 0x014F00
/* BARCTRL */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c146304..c0ceebc 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
static irqreturn_t ks_irq(int irq, void *pw)
{
- struct ks_net *ks = pw;
- struct net_device *netdev = ks->netdev;
+ struct net_device *netdev = pw;
+ struct ks_net *ks = netdev_priv(netdev);
u16 status;
/*this should be the first in IRQ handler */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 336e7c7..a8522bd 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -134,7 +134,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 3c16602..04f42ae 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[ 9] = "Q_Key violation counter",
[10] = "VMM",
[12] = "DPDP",
+ [15] = "Big LSO headers",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
@@ -235,7 +236,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
- dev_cap->reserved_eqs = 1 << (field & 0xf);
+ dev_cap->reserved_eqs = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 291a505..3cf56d9 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_port:
- for (port = 1; port <= dev->caps.num_ports; port++)
+ for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_mcg_table(dev);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index f36ae69..015fbe78 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -53,7 +53,7 @@ static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
- return EINVAL;
+ return -EINVAL;
}
*type = out_param;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1405a17..af67af5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -656,6 +656,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct sk_buff *skb;
int rx;
struct rx_desc *rx_desc;
+ int size;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
@@ -678,10 +679,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
+ size = skb->end - skb->data;
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
- skb->data, mp->skb_size,
+ skb->data, size,
DMA_FROM_DEVICE);
- rx_desc->buf_size = mp->skb_size;
+ rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 76cd1f3..9bc5bd1d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 65
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
+#define _NETXEN_NIC_LINUX_SUBVERSION 72
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.72"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index ddd704a..542f408 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
-#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_COUNT 30
#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
#define NETXEN_MAX_EEPROM_LEN 1024
@@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
return NETXEN_NIC_REGS_LEN;
}
-struct netxen_niu_regs {
- __u32 reg[NETXEN_NIC_REGS_COUNT];
-};
-
-static struct netxen_niu_regs niu_registers[] = {
- {
- /* GB Mode */
- {
- NETXEN_NIU_GB_SERDES_RESET,
- NETXEN_NIU_GB0_MII_MODE,
- NETXEN_NIU_GB1_MII_MODE,
- NETXEN_NIU_GB2_MII_MODE,
- NETXEN_NIU_GB3_MII_MODE,
- NETXEN_NIU_GB0_GMII_MODE,
- NETXEN_NIU_GB1_GMII_MODE,
- NETXEN_NIU_GB2_GMII_MODE,
- NETXEN_NIU_GB3_GMII_MODE,
- NETXEN_NIU_REMOTE_LOOPBACK,
- NETXEN_NIU_GB0_HALF_DUPLEX,
- NETXEN_NIU_GB1_HALF_DUPLEX,
- NETXEN_NIU_RESET_SYS_FIFOS,
- NETXEN_NIU_GB_CRC_DROP,
- NETXEN_NIU_GB_DROP_WRONGADDR,
- NETXEN_NIU_TEST_MUX_CTL,
-
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- NETXEN_NIU_GB_MAC_CONFIG_1(0),
- NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
- NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
- NETXEN_NIU_GB_TEST_REG(0),
- NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
- NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- NETXEN_NIU_GB_MII_MGMT_CTRL(0),
- NETXEN_NIU_GB_MII_MGMT_STATUS(0),
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- NETXEN_NIU_GB_INTERFACE_CTRL(0),
- NETXEN_NIU_GB_INTERFACE_STATUS(0),
- NETXEN_NIU_GB_STATION_ADDR_0(0),
- NETXEN_NIU_GB_STATION_ADDR_1(0),
- -1,
- }
- },
- {
- /* XG Mode */
- {
- NETXEN_NIU_XG_SINGLE_TERM,
- NETXEN_NIU_XG_DRIVE_HI,
- NETXEN_NIU_XG_DRIVE_LO,
- NETXEN_NIU_XG_DTX,
- NETXEN_NIU_XG_DEQ,
- NETXEN_NIU_XG_WORD_ALIGN,
- NETXEN_NIU_XG_RESET,
- NETXEN_NIU_XG_POWER_DOWN,
- NETXEN_NIU_XG_RESET_PLL,
- NETXEN_NIU_XG_SERDES_LOOPBACK,
- NETXEN_NIU_XG_DO_BYTE_ALIGN,
- NETXEN_NIU_XG_TX_ENABLE,
- NETXEN_NIU_XG_RX_ENABLE,
- NETXEN_NIU_XG_STATUS,
- NETXEN_NIU_XG_PAUSE_THRESHOLD,
- NETXEN_NIU_XGE_CONFIG_0,
- NETXEN_NIU_XGE_CONFIG_1,
- NETXEN_NIU_XGE_IPG,
- NETXEN_NIU_XGE_STATION_ADDR_0_HI,
- NETXEN_NIU_XGE_STATION_ADDR_0_1,
- NETXEN_NIU_XGE_STATION_ADDR_1_LO,
- NETXEN_NIU_XGE_STATUS,
- NETXEN_NIU_XGE_MAX_FRAME_SIZE,
- NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
- NETXEN_NIU_XGE_TX_BYTE_CNT,
- NETXEN_NIU_XGE_TX_FRAME_CNT,
- NETXEN_NIU_XGE_RX_BYTE_CNT,
- NETXEN_NIU_XGE_RX_FRAME_CNT,
- NETXEN_NIU_XGE_AGGR_ERROR_CNT,
- NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
- NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
- NETXEN_NIU_XGE_CRC_ERROR_CNT,
- NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
- NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
- NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
- NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
- -1,
- }
- }
-};
-
static void
netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 mode, *regs_buff = p;
- int i, window;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
memset(p, 0, NETXEN_NIC_REGS_LEN);
+
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
- /* which mode */
- regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
- mode = regs_buff[0];
-
- /* Common registers to all the modes */
- regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
- /* GB/XGB Mode */
- mode = (mode / 2) - 1;
- window = 0;
- if (mode <= 1) {
- for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
- /* GB: port specific registers */
- if (mode == 0 && i >= 19)
- window = adapter->physical_port *
- NETXEN_NIC_PORT_WINDOW;
-
- regs_buff[i] = NXRD32(adapter,
- niu_registers[mode].reg[i - 3] + window);
- }
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
}
}
static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 status;
- int val;
+ u32 val, port;
- /* read which mode */
- if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if (adapter->phy_read &&
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
- else {
- val = netxen_get_phy_link(status);
- return !val;
- }
- } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
return (val == XG_LINK_UP) ? 0 : 1;
}
- return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 2e364fe..85e28e6 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -345,8 +345,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
void
netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
{
- int val;
- val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
@@ -691,6 +690,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
struct list_head *head;
nx_mac_list_t *cur;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02f8d4b..64cff68 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,6 +184,8 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -782,7 +784,7 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
- old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f499684..24279e6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -57,7 +57,9 @@ static int use_msi = 1;
static int use_msi_x = 1;
-static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
static int __devinit netxen_nic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -338,7 +340,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
- first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
@@ -1896,12 +1898,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
linkup = (val == XG_LINK_UP_P3);
} else {
val = NXRD32(adapter, CRB_XG_STATE);
- if (adapter->ahw.port_type == NETXEN_NIC_GBE)
- linkup = (val >> port) & 1;
- else {
- val = (val >> port*8) & 0xff;
- linkup = (val == XG_LINK_UP);
- }
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
}
netxen_advert_link_change(adapter, linkup);
@@ -1943,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
clear_bit(__NX_RESETTING, &adapter->state);
-
+ return;
} else {
clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
@@ -2242,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
netxen_nic_down(adapter, netdev);
+ rtnl_lock();
netxen_nic_detach(adapter);
+ rtnl_unlock();
status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
@@ -2534,42 +2534,6 @@ static struct bin_attribute bin_attr_mem = {
.write = netxen_sysfs_write_mem,
};
-#ifdef CONFIG_MODULES
-static ssize_t
-netxen_store_auto_fw_reset(struct module_attribute *mattr,
- struct module *mod, const char *buf, size_t count)
-
-{
- unsigned long new;
-
- if (strict_strtoul(buf, 16, &new))
- return -EINVAL;
-
- if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
- auto_fw_reset = new;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t
-netxen_show_auto_fw_reset(struct module_attribute *mattr,
- struct module *mod, char *buf)
-
-{
- if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
- return sprintf(buf, "enabled\n");
- else
- return sprintf(buf, "disabled\n");
-}
-
-static struct module_attribute mod_attr_fw_reset = {
- .attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
- .show = netxen_show_auto_fw_reset,
- .store = netxen_store_auto_fw_reset,
-};
-#endif
static void
netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2775,23 +2739,12 @@ static struct pci_driver netxen_driver = {
static int __init netxen_init_module(void)
{
-#ifdef CONFIG_MODULES
- struct module *mod = THIS_MODULE;
-#endif
-
printk(KERN_INFO "%s\n", netxen_nic_driver_string);
#ifdef CONFIG_INET
register_netdevice_notifier(&netxen_netdev_cb);
register_inetaddr_notifier(&netxen_inetaddr_cb);
#endif
-
-#ifdef CONFIG_MODULES
- if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
- printk(KERN_ERR "%s: Failed to create auto_fw_reset "
- "sysfs entry.", netxen_nic_driver_name);
-#endif
-
return pci_register_driver(&netxen_driver);
}
@@ -2799,12 +2752,6 @@ module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
-#ifdef CONFIG_MODULES
- struct module *mod = THIS_MODULE;
-
- sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
-#endif
-
pci_unregister_driver(&netxen_driver);
#ifdef CONFIG_INET
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ce58c4..2aed2b3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -2844,7 +2844,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit)
break;
udelay(1);
}
- if (limit < 0)
+ if (limit <= 0)
return -ENODEV;
return 0;
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig
new file mode 100644
index 0000000..1e56bbf
--- /dev/null
+++ b/drivers/net/octeon/Kconfig
@@ -0,0 +1,10 @@
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CPU_CAVIUM_OCTEON
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ help
+ This option enables the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 0000000..906edeca
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
new file mode 100644
index 0000000..050538b
--- /dev/null
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -0,0 +1,1176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mixx-defs.h>
+#include <asm/octeon/cvmx-agl-defs.h>
+
+#define DRV_NAME "octeon_mgmt"
+#define DRV_VERSION "2.0"
+#define DRV_DESCRIPTION \
+ "Cavium Networks Octeon MII (management) port Network Driver"
+
+#define OCTEON_MGMT_NAPI_WEIGHT 16
+
+/*
+ * Ring sizes that are powers of two allow for more efficient modulo
+ * opertions.
+ */
+#define OCTEON_MGMT_RX_RING_SIZE 512
+#define OCTEON_MGMT_TX_RING_SIZE 128
+
+/* Allow 8 bytes for vlan and FCS. */
+#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+union mgmt_port_ring_entry {
+ u64 d64;
+ struct {
+ u64 reserved_62_63:2;
+ /* Length of the buffer/packet in bytes */
+ u64 len:14;
+ /* For TX, signals that the packet should be timestamped */
+ u64 tstamp:1;
+ /* The RX error code */
+ u64 code:7;
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
+ /* Physical address of the buffer */
+ u64 addr:40;
+ } s;
+};
+
+struct octeon_mgmt {
+ struct net_device *netdev;
+ int port;
+ int irq;
+ u64 *tx_ring;
+ dma_addr_t tx_ring_handle;
+ unsigned int tx_next;
+ unsigned int tx_next_clean;
+ unsigned int tx_current_fill;
+ /* The tx_list lock also protects the ring related variables */
+ struct sk_buff_head tx_list;
+
+ /* RX variables only touched in napi_poll. No locking necessary. */
+ u64 *rx_ring;
+ dma_addr_t rx_ring_handle;
+ unsigned int rx_next;
+ unsigned int rx_next_fill;
+ unsigned int rx_current_fill;
+ struct sk_buff_head rx_list;
+
+ spinlock_t lock;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ struct device *dev;
+ struct napi_struct napi;
+ struct tasklet_struct tx_clean_tasklet;
+ struct phy_device *phydev;
+};
+
+static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.ithena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.othena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 0);
+}
+
+static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 0);
+}
+
+static unsigned int ring_max_fill(unsigned int ring_size)
+{
+ return ring_size - 8;
+}
+
+static unsigned int ring_size_to_bytes(unsigned int ring_size)
+{
+ return ring_size * sizeof(union mgmt_port_ring_entry);
+}
+
+static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+
+ while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
+ unsigned int size;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+
+ /* CN56XX pass 1 needs 8 bytes of padding. */
+ size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
+
+ skb = netdev_alloc_skb(netdev, size);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ __skb_queue_tail(&p->rx_list, skb);
+
+ re.d64 = 0;
+ re.s.len = size;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* Put it in the ring. */
+ p->rx_ring[p->rx_next_fill] = re.d64;
+ dma_sync_single_for_device(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->rx_next_fill =
+ (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill++;
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ }
+}
+
+static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ union cvmx_mixx_orcnt mix_orcnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ int cleaned = 0;
+ unsigned long flags;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ while (mix_orcnt.s.orcnt) {
+ dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ re.d64 = p->tx_ring[p->tx_next_clean];
+ p->tx_next_clean =
+ (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ skb = __skb_dequeue(&p->tx_list);
+
+ mix_orcnt.u64 = 0;
+ mix_orcnt.s.orcnt = 1;
+
+ /* Acknowledge to hardware that we have the buffer. */
+ cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
+ p->tx_current_fill--;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ cleaned++;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ }
+
+ if (cleaned && netif_queue_stopped(p->netdev))
+ netif_wake_queue(p->netdev);
+}
+
+static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+{
+ struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_enable_tx_irq(p);
+}
+
+static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+ u64 drop, bad;
+
+ /* These reads also clear the count registers. */
+ drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
+ bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
+
+ if (drop || bad) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.rx_errors += bad;
+ netdev->stats.rx_dropped += drop;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+
+ union cvmx_agl_gmx_txx_stat0 s0;
+ union cvmx_agl_gmx_txx_stat1 s1;
+
+ /* These reads also clear the count registers. */
+ s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
+ s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
+
+ if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
+ netdev->stats.collisions += s1.s.scol + s1.s.mcol;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+/*
+ * Dequeue a receive skb and its corresponding ring entry. The ring
+ * entry is returned, *pskb is updated to point to the skb.
+ */
+static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
+ struct sk_buff **pskb)
+{
+ union mgmt_port_ring_entry re;
+
+ dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->rx_ring[p->rx_next];
+ p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill--;
+ *pskb = __skb_dequeue(&p->rx_list);
+
+ dma_unmap_single(p->dev, re.s.addr,
+ ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
+ DMA_FROM_DEVICE);
+
+ return re.d64;
+}
+
+
+static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ struct net_device *netdev = p->netdev;
+ union cvmx_mixx_ircnt mix_ircnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ struct sk_buff *skb_new;
+ union mgmt_port_ring_entry re2;
+ int rc = 1;
+
+
+ re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
+ if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
+ /* A good packet, send it up. */
+ skb_put(skb, re.s.len);
+good:
+ skb->protocol = eth_type_trans(skb, netdev);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+ netdev->last_rx = jiffies;
+ netif_receive_skb(skb);
+ rc = 0;
+ } else if (re.s.code == RING_ENTRY_CODE_MORE) {
+ /*
+ * Packet split across skbs. This can happen if we
+ * increase the MTU. Buffers that are already in the
+ * rx ring can then end up being too small. As the rx
+ * ring is refilled, buffers sized for the new MTU
+ * will be used and we should go back to the normal
+ * non-split case.
+ */
+ skb_put(skb, re.s.len);
+ do {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ if (re2.s.code != RING_ENTRY_CODE_MORE
+ && re2.s.code != RING_ENTRY_CODE_DONE)
+ goto split_error;
+ skb_put(skb2, re2.s.len);
+ skb_new = skb_copy_expand(skb, 0, skb2->len,
+ GFP_ATOMIC);
+ if (!skb_new)
+ goto split_error;
+ if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
+ skb2->len))
+ goto split_error;
+ skb_put(skb_new, skb2->len);
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ skb = skb_new;
+ } while (re2.s.code == RING_ENTRY_CODE_MORE);
+ goto good;
+ } else {
+ /* Some other error, discard it. */
+ dev_kfree_skb_any(skb);
+ /*
+ * Error statistics are accumulated in
+ * octeon_mgmt_update_rx_stats.
+ */
+ }
+ goto done;
+split_error:
+ /* Discard the whole mess. */
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ while (re2.s.code == RING_ENTRY_CODE_MORE) {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ dev_kfree_skb_any(skb2);
+ }
+ netdev->stats.rx_errors++;
+
+done:
+ /* Tell the hardware we processed a packet. */
+ mix_ircnt.u64 = 0;
+ mix_ircnt.s.ircnt = 1;
+ cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
+ return rc;
+
+}
+
+static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
+{
+ int port = p->port;
+ unsigned int work_done = 0;
+ union cvmx_mixx_ircnt mix_ircnt;
+ int rc;
+
+
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ while (work_done < budget && mix_ircnt.s.ircnt) {
+
+ rc = octeon_mgmt_receive_one(p);
+ if (!rc)
+ work_done++;
+
+ /* Check for more packets. */
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ }
+
+ octeon_mgmt_rx_fill_ring(p->netdev);
+
+ return work_done;
+}
+
+static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
+ struct net_device *netdev = p->netdev;
+ unsigned int work_done = 0;
+
+ work_done = octeon_mgmt_receive_packets(p, budget);
+
+ if (work_done < budget) {
+ /* We stopped because no more packets were available. */
+ napi_complete(napi);
+ octeon_mgmt_enable_rx_irq(p);
+ }
+ octeon_mgmt_update_rx_stats(netdev);
+
+ return work_done;
+}
+
+/* Reset the hardware to clean state. */
+static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
+{
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_mixx_bist mix_bist;
+ union cvmx_agl_gmx_bist agl_gmx_bist;
+
+ mix_ctl.u64 = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ } while (mix_ctl.s.busy);
+ mix_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ cvmx_wait(64);
+
+ mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
+ if (mix_bist.u64)
+ dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
+ (unsigned long long)mix_bist.u64);
+
+ agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
+ if (agl_gmx_bist.u64)
+ dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
+ (unsigned long long)agl_gmx_bist.u64);
+}
+
+struct octeon_mgmt_cam_state {
+ u64 cam[6];
+ u64 cam_mask;
+ int cam_index;
+};
+
+static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
+ unsigned char *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
+ cs->cam_mask |= (1ULL << cs->cam_index);
+ cs->cam_index++;
+}
+
+static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ int i;
+ union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
+ union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
+ unsigned long flags;
+ unsigned int prev_packet_enable;
+ unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
+ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
+ struct octeon_mgmt_cam_state cam_state;
+ struct dev_addr_list *list;
+ struct list_head *pos;
+ int available_cam_entries;
+
+ memset(&cam_state, 0, sizeof(cam_state));
+
+ if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ cam_mode = 0;
+ available_cam_entries = 8;
+ } else {
+ /*
+ * One CAM entry for the primary address, leaves seven
+ * for the secondary addresses.
+ */
+ available_cam_entries = 7 - netdev->dev_addrs.count;
+ }
+
+ if (netdev->flags & IFF_MULTICAST) {
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
+ || netdev->mc_count > available_cam_entries)
+ multicast_mode = 2; /* 1 - Accept all multicast. */
+ else
+ multicast_mode = 0; /* 0 - Use CAM. */
+ }
+
+ if (cam_mode == 1) {
+ /* Add primary address. */
+ octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
+ list_for_each(pos, &netdev->dev_addrs.list) {
+ struct netdev_hw_addr *hw_addr;
+ hw_addr = list_entry(pos, struct netdev_hw_addr, list);
+ octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
+ list = list->next;
+ }
+ }
+ if (multicast_mode == 0) {
+ i = netdev->mc_count;
+ list = netdev->mc_list;
+ while (i--) {
+ octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ list = list->next;
+ }
+ }
+
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Disable packet I/O. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prev_packet_enable = agl_gmx_prtx.s.en;
+ agl_gmx_prtx.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+
+ adr_ctl.u64 = 0;
+ adr_ctl.s.cam_mode = cam_mode;
+ adr_ctl.s.mcst = multicast_mode;
+ adr_ctl.s.bcst = 1; /* Allow broadcast */
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
+
+ /* Restore packet I/O. */
+ agl_gmx_prtx.s.en = prev_packet_enable;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ octeon_mgmt_set_rx_filtering(netdev);
+
+ return 0;
+}
+
+static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 16383 bytes.
+ */
+ if (size_without_fcs < 64 || size_without_fcs > 16383) {
+ dev_warn(p->dev, "MTU must be between %d and %d.\n",
+ 64 - OCTEON_MGMT_RX_HEADROOM,
+ 16383 - OCTEON_MGMT_RX_HEADROOM);
+ return -EINVAL;
+ }
+
+ netdev->mtu = new_mtu;
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
+ (size_without_fcs + 7) & 0xfff8);
+
+ return 0;
+}
+
+static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_isr mixx_isr;
+
+ mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port),
+ cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ if (mixx_isr.s.irthresh) {
+ octeon_mgmt_disable_rx_irq(p);
+ napi_schedule(&p->napi);
+ }
+ if (mixx_isr.s.orthresh) {
+ octeon_mgmt_disable_tx_irq(p);
+ tasklet_schedule(&p->tx_clean_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int octeon_mgmt_ioctl(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!p->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
+}
+
+static void octeon_mgmt_adjust_link(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ unsigned long flags;
+ int link_changed = 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ if (p->phydev->link) {
+ if (!p->last_link)
+ link_changed = 1;
+ if (p->last_duplex != p->phydev->duplex) {
+ p->last_duplex = p->phydev->duplex;
+ prtx_cfg.u64 =
+ cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.duplex = p->phydev->duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
+ prtx_cfg.u64);
+ }
+ } else {
+ if (p->last_link)
+ link_changed = -1;
+ }
+ p->last_link = p->phydev->link;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (link_changed != 0) {
+ if (link_changed > 0) {
+ netif_carrier_on(netdev);
+ pr_info("%s: Link is up - %d/%s\n", netdev->name,
+ p->phydev->speed,
+ DUPLEX_FULL == p->phydev->duplex ?
+ "Full" : "Half");
+ } else {
+ netif_carrier_off(netdev);
+ pr_info("%s: Link is down\n", netdev->name);
+ }
+ }
+}
+
+static int octeon_mgmt_init_phy(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ char phy_id[20];
+
+ if (octeon_is_simulation()) {
+ /* No PHYs in the simulator. */
+ netif_carrier_on(netdev);
+ return 0;
+ }
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
+
+ p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(p->phydev)) {
+ p->phydev = NULL;
+ return -1;
+ }
+
+ phy_start_aneg(p->phydev);
+
+ return 0;
+}
+
+static int octeon_mgmt_open(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
+ union cvmx_mixx_oring1 oring1;
+ union cvmx_mixx_iring1 iring1;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mixx_irhwm mix_irhwm;
+ union cvmx_mixx_orhwm mix_orhwm;
+ union cvmx_mixx_intena mix_intena;
+ struct sockaddr sa;
+
+ /* Allocate ring buffers. */
+ p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->tx_ring)
+ return -ENOMEM;
+ p->tx_ring_handle =
+ dma_map_single(p->dev, p->tx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->tx_next = 0;
+ p->tx_next_clean = 0;
+ p->tx_current_fill = 0;
+
+
+ p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->rx_ring)
+ goto err_nomem;
+ p->rx_ring_handle =
+ dma_map_single(p->dev, p->rx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ p->rx_next = 0;
+ p->rx_next_fill = 0;
+ p->rx_current_fill = 0;
+
+ octeon_mgmt_reset_hw(p);
+
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+
+ /* Bring it out of reset if needed. */
+ if (mix_ctl.s.reset) {
+ mix_ctl.s.reset = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ } while (mix_ctl.s.reset);
+ }
+
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ oring1.u64 = 0;
+ oring1.s.obase = p->tx_ring_handle >> 3;
+ oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
+
+ iring1.u64 = 0;
+ iring1.s.ibase = p->rx_ring_handle >> 3;
+ iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
+
+ /* Disable packet I/O. */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
+ octeon_mgmt_set_mac_address(netdev, &sa);
+
+ octeon_mgmt_change_mtu(netdev, netdev->mtu);
+
+ /*
+ * Enable the port HW. Packets are not allowed until
+ * cvmx_mgmt_port_enable() is called.
+ */
+ mix_ctl.u64 = 0;
+ mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
+ mix_ctl.s.en = 1; /* Enable the port */
+ mix_ctl.s.nbtarb = 0; /* Arbitration mode */
+ /* MII CB-request FIFO programmable high watermark */
+ mix_ctl.s.mrq_hwm = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /*
+ * Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
+
+ octeon_mgmt_rx_fill_ring(netdev);
+
+ /* Clear statistics. */
+ /* Clear on read. */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
+
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
+
+ if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
+ netdev)) {
+ dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
+ goto err_noirq;
+ }
+
+ /* Interrupt every single RX packet */
+ mix_irhwm.u64 = 0;
+ mix_irhwm.s.irhwm = 0;
+ cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
+
+ /* Interrupt when we have 5 or more packets to clean. */
+ mix_orhwm.u64 = 0;
+ mix_orhwm.s.orhwm = 5;
+ cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
+
+ /* Enable receive and transmit interrupts */
+ mix_intena.u64 = 0;
+ mix_intena.s.ithena = 1;
+ mix_intena.s.othena = 1;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+
+
+ /* Enable packet I/O. */
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ /*
+ * When set, disables the length check for non-min sized pkts
+ * with padding in the client data.
+ */
+ rxx_frm_ctl.s.pad_len = 1;
+ /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.vlan_len = 1;
+ /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.pre_free = 1;
+ /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_smac = 0;
+ /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_mcst = 1;
+ /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_bck = 1;
+ /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.ctl_drp = 1;
+ /* Strip off the preamble */
+ rxx_frm_ctl.s.pre_strp = 1;
+ /*
+ * This port is configured to send PREAMBLE+SFD to begin every
+ * frame. GMX checks that the PREAMBLE is sent correctly.
+ */
+ rxx_frm_ctl.s.pre_chk = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+ /* Enable the AGL block */
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ /* Configure the port duplex and enables */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ p->last_duplex = 1;
+ prtx_cfg.s.duplex = p->last_duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ p->last_link = 0;
+ netif_carrier_off(netdev);
+
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY.\n");
+ goto err_noirq;
+ }
+
+ netif_wake_queue(netdev);
+ napi_enable(&p->napi);
+
+ return 0;
+err_noirq:
+ octeon_mgmt_reset_hw(p);
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+err_nomem:
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+ return -ENOMEM;
+}
+
+static int octeon_mgmt_stop(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ napi_disable(&p->napi);
+ netif_stop_queue(netdev);
+
+ if (p->phydev)
+ phy_disconnect(p->phydev);
+
+ netif_carrier_off(netdev);
+
+ octeon_mgmt_reset_hw(p);
+
+
+ free_irq(p->irq, netdev);
+
+ /* dma_unmap is a nop on Octeon, so just free everything. */
+ skb_queue_purge(&p->tx_list);
+ skb_queue_purge(&p->rx_list);
+
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+
+
+ return 0;
+}
+
+static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union mgmt_port_ring_entry re;
+ unsigned long flags;
+
+ re.d64 = 0;
+ re.s.len = skb->len;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ if (unlikely(p->tx_current_fill >=
+ ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&p->tx_list, skb);
+
+ /* Put it in the ring. */
+ p->tx_ring[p->tx_next] = re.d64;
+ p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ p->tx_current_fill++;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_sync_single_for_device(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+
+ netdev->trans_start = jiffies;
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_update_tx_stats(netdev);
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void octeon_mgmt_poll_controller(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ octeon_mgmt_receive_packets(p, 16);
+ octeon_mgmt_update_rx_stats(netdev);
+ return;
+}
+#endif
+
+static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strncpy(info->version, DRV_VERSION, sizeof(info->version));
+ strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ info->n_stats = 0;
+ info->testinfo_len = 0;
+ info->regdump_len = 0;
+ info->eedump_len = 0;
+}
+
+static int octeon_mgmt_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (p->phydev)
+ return phy_ethtool_gset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static int octeon_mgmt_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (p->phydev)
+ return phy_ethtool_sset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
+ .get_drvinfo = octeon_mgmt_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = octeon_mgmt_get_settings,
+ .set_settings = octeon_mgmt_set_settings
+};
+
+static const struct net_device_ops octeon_mgmt_ops = {
+ .ndo_open = octeon_mgmt_open,
+ .ndo_stop = octeon_mgmt_stop,
+ .ndo_start_xmit = octeon_mgmt_xmit,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
+ .ndo_set_mac_address = octeon_mgmt_set_mac_address,
+ .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_change_mtu = octeon_mgmt_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = octeon_mgmt_poll_controller,
+#endif
+};
+
+static int __init octeon_mgmt_probe(struct platform_device *pdev)
+{
+ struct resource *res_irq;
+ struct net_device *netdev;
+ struct octeon_mgmt *p;
+ int i;
+
+ netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
+ if (netdev == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, netdev);
+ p = netdev_priv(netdev);
+ netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
+
+ p->netdev = netdev;
+ p->dev = &pdev->dev;
+
+ p->port = pdev->id;
+ snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ goto err;
+
+ p->irq = res_irq->start;
+ spin_lock_init(&p->lock);
+
+ skb_queue_head_init(&p->tx_list);
+ skb_queue_head_init(&p->rx_list);
+ tasklet_init(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+
+ netdev->netdev_ops = &octeon_mgmt_ops;
+ netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+
+
+ /* The mgmt ports get the first N MACs. */
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
+ netdev->dev_addr[5] += p->port;
+
+ if (p->port >= octeon_bootinfo->mac_addr_count)
+ dev_err(&pdev->dev,
+ "Error %s: Using MAC outside of the assigned range: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1],
+ netdev->dev_addr[2], netdev->dev_addr[3],
+ netdev->dev_addr[4], netdev->dev_addr[5]);
+
+ if (register_netdev(netdev))
+ goto err;
+
+ dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+ return 0;
+err:
+ free_netdev(netdev);
+ return -ENOENT;
+}
+
+static int __exit octeon_mgmt_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver octeon_mgmt_driver = {
+ .driver = {
+ .name = "octeon_mgmt",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_mgmt_probe,
+ .remove = __exit_p(octeon_mgmt_remove),
+};
+
+extern void octeon_mdiobus_force_mod_depencency(void);
+
+static int __init octeon_mgmt_mod_init(void)
+{
+ /* Force our mdiobus driver module to be loaded first. */
+ octeon_mdiobus_force_mod_depencency();
+ return platform_driver_register(&octeon_mgmt_driver);
+}
+
+static void __exit octeon_mgmt_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_mgmt_driver);
+}
+
+module_init(octeon_mgmt_mod_init);
+module_exit(octeon_mgmt_mod_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 17a2722..98938ea 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -912,7 +912,11 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
if (!lp->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+
+ local_irq_save(flags);
el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
lp->fast_poll = HZ;
}
if (lp->fast_poll) {
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 6f8d7e2..322e11d 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -711,7 +711,11 @@ static void media_check(unsigned long arg)
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+
+ local_irq_save(flags);
el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
lp->fast_poll = HZ;
}
if (lp->fast_poll) {
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 813aca3..7b17404 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -717,6 +717,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 8a5ae3b..12e3233 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr)
for (i = 0; i < 8; i++)
printk(KERN_CONT " %02X", ladrf[i]);
printk(KERN_CONT "\n");
- }
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 92ed3fb..776cad2 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1741,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
@@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
MODULE_FIRMWARE("cis/PCMLM28.cis");
MODULE_FIRMWARE("cis/DP83903.cis");
MODULE_FIRMWARE("cis/LA-PCM.cis");
-MODULE_FIRMWARE("PE520.cis");
+MODULE_FIRMWARE("cis/PE520.cis");
MODULE_FIRMWARE("cis/NE2K.cis");
MODULE_FIRMWARE("cis/PE-200.cis");
MODULE_FIRMWARE("cis/tamarack.cis");
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index dcc67a3..e154677 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -45,6 +45,7 @@ static const char *const version =
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+ memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d5d8e1c..fc5938ba 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -115,4 +115,15 @@ config MDIO_GPIO
To compile this driver as a module, choose M here: the module
will be called mdio-gpio.
+config MDIO_OCTEON
+ tristate "Support for MDIO buses on Octeon SOCs"
+ depends on CPU_CAVIUM_OCTEON
+ default y
+ help
+
+ This module provides a driver for the Octeon MDIO busses.
+ It is required by the Octeon Ethernet device drivers.
+
+ If in doubt, say Y.
+
endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index edfaac4..1342585 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f63c96a..33c4b12 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -326,12 +326,13 @@ error:
static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
{
- u32 val, orig;
+ u32 orig;
+ int val;
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
return;
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
new file mode 100644
index 0000000..61a4461
--- /dev/null
+++ b/drivers/net/phy/mdio-octeon.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-smix-defs.h>
+
+#define DRV_VERSION "1.0"
+#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+
+struct octeon_mdiobus {
+ struct mii_bus *mii_bus;
+ int unit;
+ int phy_irq[PHY_MAX_ADDR];
+};
+
+static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct octeon_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ int timeout = 1000;
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+
+ do {
+ /*
+ * Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ cvmx_wait(1000);
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
+ } while (smi_rd.s.pending && --timeout);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -EIO;
+}
+
+static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
+ int regnum, u16 val)
+{
+ struct octeon_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+
+ do {
+ /*
+ * Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int __init octeon_mdiobus_probe(struct platform_device *pdev)
+{
+ struct octeon_mdiobus *bus;
+ int i;
+ int err = -ENOENT;
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ /* The platform_device id is our unit number. */
+ bus->unit = pdev->id;
+
+ bus->mii_bus = mdiobus_alloc();
+
+ if (!bus->mii_bus)
+ goto err;
+
+ /*
+ * Standard Octeon evaluation boards don't support phy
+ * interrupts, we need to poll.
+ */
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->phy_irq[i] = PHY_POLL;
+
+ bus->mii_bus->priv = bus;
+ bus->mii_bus->irq = bus->phy_irq;
+ bus->mii_bus->name = "mdio-octeon";
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
+ bus->mii_bus->parent = &pdev->dev;
+
+ bus->mii_bus->read = octeon_mdiobus_read;
+ bus->mii_bus->write = octeon_mdiobus_write;
+
+ dev_set_drvdata(&pdev->dev, bus);
+
+ err = mdiobus_register(bus->mii_bus);
+ if (err)
+ goto err_register;
+
+ dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+
+ return 0;
+err_register:
+ mdiobus_free(bus->mii_bus);
+
+err:
+ devm_kfree(&pdev->dev, bus);
+ return err;
+}
+
+static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
+{
+ struct octeon_mdiobus *bus;
+
+ bus = dev_get_drvdata(&pdev->dev);
+
+ mdiobus_unregister(bus->mii_bus);
+ mdiobus_free(bus->mii_bus);
+ return 0;
+}
+
+static struct platform_driver octeon_mdiobus_driver = {
+ .driver = {
+ .name = "mdio-octeon",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_mdiobus_probe,
+ .remove = __exit_p(octeon_mdiobus_remove),
+};
+
+void octeon_mdiobus_force_mod_depencency(void)
+{
+ /* Let ethernet drivers force us to be loaded. */
+}
+EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
+
+static int __init octeon_mdiobus_mod_init(void)
+{
+ return platform_driver_register(&octeon_mdiobus_driver);
+}
+
+static void __exit octeon_mdiobus_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_mdiobus_driver);
+}
+
+module_init(octeon_mdiobus_mod_init);
+module_exit(octeon_mdiobus_mod_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd4e8d7..e17b702 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
(phydev->phy_id & phydrv->phy_id_mask));
}
+#ifdef CONFIG_PM
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->dev.driver;
@@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
return true;
}
-/* Suspend and resume. Copied from platform_suspend and
- * platform_resume
- */
-static int mdio_bus_suspend(struct device * dev, pm_message_t state)
+static int mdio_bus_suspend(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ /*
+ * We must stop the state machine manually, otherwise it stops out of
+ * control, possibly with the phydev->lock held. Upon resume, netdev
+ * may call phy routines that try to grab the same lock, and that may
+ * lead to a deadlock.
+ */
+ if (phydev->attached_dev)
+ phy_stop_machine(phydev);
+
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+
return phydrv->suspend(phydev);
}
-static int mdio_bus_resume(struct device * dev)
+static int mdio_bus_resume(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ int ret;
if (!mdio_bus_phy_may_suspend(phydev))
+ goto no_resume;
+
+ ret = phydrv->resume(phydev);
+ if (ret < 0)
+ return ret;
+
+no_resume:
+ if (phydev->attached_dev)
+ phy_start_machine(phydev, NULL);
+
+ return 0;
+}
+
+static int mdio_bus_restore(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!netdev)
return 0;
- return phydrv->resume(phydev);
+
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* The PHY needs to renegotiate. */
+ phydev->link = 0;
+ phydev->state = PHY_UP;
+
+ phy_start_machine(phydev, NULL);
+
+ return 0;
}
+static struct dev_pm_ops mdio_bus_pm_ops = {
+ .suspend = mdio_bus_suspend,
+ .resume = mdio_bus_resume,
+ .freeze = mdio_bus_suspend,
+ .thaw = mdio_bus_resume,
+ .restore = mdio_bus_restore,
+};
+
+#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
+
+#else
+
+#define MDIO_BUS_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
- .suspend = mdio_bus_suspend,
- .resume = mdio_bus_resume,
+ .pm = MDIO_BUS_PM_OPS,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b0e9f9c..0295097 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -410,7 +410,6 @@ EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
-static void phy_state_machine(struct work_struct *work);
/**
* phy_start_machine - start PHY state machine tracking
@@ -430,7 +429,6 @@ void phy_start_machine(struct phy_device *phydev,
{
phydev->adjust_state = handler;
- INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
schedule_delayed_work(&phydev->state_queue, HZ);
}
@@ -761,7 +759,7 @@ EXPORT_SYMBOL(phy_start);
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
*/
-static void phy_state_machine(struct work_struct *work)
+void phy_state_machine(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b10fedd..adbc0fd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -177,6 +177,7 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
dev->state = PHY_DOWN;
mutex_init(&dev->lock);
+ INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
return dev;
}
@@ -378,6 +379,20 @@ void phy_disconnect(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_disconnect);
+int phy_init_hw(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->drv || !phydev->drv->config_init)
+ return 0;
+
+ ret = phy_scan_fixups(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phydev->drv->config_init(phydev);
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -425,21 +440,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
- if (phydev->drv->config_init) {
- int err;
-
- err = phy_scan_fixups(phydev);
-
- if (err < 0)
- return err;
-
- err = phydev->drv->config_init(phydev);
-
- if (err < 0)
- return err;
- }
-
- return 0;
+ return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391..894a7c8 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4119,7 +4119,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = pcie_set_readrq(pdev, 4096);
if (err) {
dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out;
+ goto err_out1;
}
err = pci_request_regions(pdev, DRV_NAME);
@@ -4140,7 +4140,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out;
+ goto err_out2;
}
/* Set PCIe reset type for EEH to fundamental. */
@@ -4152,7 +4152,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
@@ -4162,14 +4162,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out2;
}
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
err = -EIO;
- goto err_out;
+ goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
spin_lock_init(&qdev->hw_lock);
@@ -4179,7 +4179,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = qdev->nic_ops->get_flash(qdev);
if (err) {
dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out;
+ goto err_out2;
}
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
@@ -4212,8 +4212,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
DRV_NAME, DRV_VERSION);
}
return 0;
-err_out:
+err_out2:
ql_release_all(pdev);
+err_out1:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 20a7174..1c25709 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1293,7 +1293,7 @@ static void rr_dump(struct net_device *dev)
printk("Error code 0x%x\n", readl(&regs->Fail1));
- index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
+ index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc42186..3c4836d 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3421,7 +3421,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
break;
}
} else {
- if (!(val64 & busy_bit)) {
+ if (val64 & busy_bit) {
ret = SUCCESS;
break;
}
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f983e3b..46997e1 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
EFX_LOG(efx, "create port\n");
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
/* Connect up MAC/PHY operations table */
rc = efx->type->probe_port(efx);
if (rc)
goto err;
- if (phy_flash_cfg)
- efx->phy_mode = PHY_MODE_SPECIAL;
-
/* Sanity check MAC address */
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
+ WARN_ON(rc > 0);
EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 17afcd2..9d009c4 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
static void falcon_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index bf0b96a..5712fdd 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -29,6 +29,15 @@
#define FALCON_BOARD_SFN4111T 0x51
#define FALCON_BOARD_SFN4112F 0x52
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. */
+#define FALCON_BOARD_TEMP_BIAS 15
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MAX 90
+
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
@@ -548,16 +557,16 @@ fail_hwmon:
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4002_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfn4112f_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 3da933f..8ccab2c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
}
-/* Get status of XAUI link */
-static bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
{
efx_oword_t reg;
bool align_done, link_ok = false;
int sync_status;
- if (LOOPBACK_INTERNAL(efx))
- return true;
-
/* Read link status */
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
- /* If the link is up, then check the phy side of the xaui link */
- if (efx->link_state.up && link_ok)
- if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
- link_ok = efx_mdio_phyxgxs_lane_sync(efx);
-
return link_ok;
}
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
void falcon_reconfigure_xmac_core(struct efx_nic *efx)
{
unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
{
- bool mac_up = falcon_xaui_link_ok(efx);
+ bool mac_up = falcon_xmac_link_ok(efx);
if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
falcon_reset_xaui(efx);
udelay(200);
- mac_up = falcon_xaui_link_ok(efx);
+ mac_up = falcon_xmac_link_ok(efx);
--tries;
}
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
static bool falcon_xmac_check_fault(struct efx_nic *efx)
{
- return !falcon_check_xaui_link_up(efx, 5);
+ return !falcon_xmac_link_ok_retry(efx, 5);
}
static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_mask_status_intr(efx, true);
return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
return;
falcon_mask_status_intr(efx, false);
- efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
+ efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_mask_status_intr(efx, true);
}
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 683353b..f66b3da 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
efx_dword_t reg;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = efx_mcdi_poll_reboot(efx);
+ rc = -efx_mcdi_poll_reboot(efx);
if (rc)
goto out;
@@ -142,8 +142,9 @@ static int efx_mcdi_poll(struct efx_nic *efx)
if (spins != 0) {
--spins;
udelay(1);
- } else
- schedule();
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
time = get_seconds();
@@ -803,7 +804,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)];
+ u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
size_t outlen;
int rc;
@@ -827,7 +828,7 @@ fail:
int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer, size_t length)
{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)];
+ u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
@@ -837,7 +838,8 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf),
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
NULL, 0, NULL);
if (rc)
goto fail;
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index de91672..10ce98f 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -111,6 +111,7 @@ extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
loff_t offset, const u8 *buffer,
size_t length);
+#define EFX_MCDI_NVRAM_LEN_MAX 128
extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360..73e71f4 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1090,8 +1090,10 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 57
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 58
#define MC_CMD_MAC_RX_MATCH_FAULT 59
+#define MC_CMD_GMAC_DMABUF_START 64
+#define MC_CMD_GMAC_DMABUF_END 95
/* Insert new members here. */
-#define MC_CMD_MAC_GENERATION_END 60
+#define MC_CMD_MAC_GENERATION_END 96
#define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1)
/* MC_CMD_MAC_STATS:
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e1bcc5..eb694af 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg;
+ struct efx_mcdi_phy_cfg *phy_data;
+ u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ u32 caps;
int rc;
- /* TODO: Move phy_data initialisation to
- * phy_op->probe/remove, rather than init/fini */
- phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
- if (phy_cfg == NULL) {
- rc = -ENOMEM;
- goto fail_alloc;
- }
- rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
if (rc != 0)
goto fail;
- efx->phy_type = phy_cfg->type;
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_cfg->channel;
- efx->mdio.prtad = phy_cfg->port;
- efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
efx->mdio.mode_support = 0;
- if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
/* Assert that we can map efx -> mcdi loopback modes */
BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
- kfree(phy_cfg);
-
- return 0;
-
-fail:
- kfree(phy_cfg);
-fail_alloc:
- return rc;
-}
-
-static int efx_mcdi_phy_init(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_cfg *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- u32 caps;
- int rc;
-
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- efx->phy_data = phy_data;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->link_advertising =
- mcdi_to_ethtool_cap(phy_data->media, caps);
- else
- phy_data->forced_cap = caps;
-
return 0;
fail:
@@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void efx_mcdi_phy_fini(struct efx_nic *efx)
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
- .init = efx_mcdi_phy_init,
+ .init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
.poll = efx_mcdi_phy_poll,
- .fini = efx_mcdi_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
.run_tests = NULL,
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 3a46452..407bbad 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -23,7 +23,6 @@
#include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16
-#define EFX_MCDI_CHUNK_LEN 128
struct efx_mtd_partition {
struct mtd_info mtd;
@@ -428,7 +427,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
int rc = 0;
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
@@ -491,7 +490,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
}
while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
buffer, chunk);
if (rc)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 34c381f..d5aab5b 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -524,6 +524,7 @@ struct efx_phy_operations {
int (*probe) (struct efx_nic *efx);
int (*init) (struct efx_nic *efx);
void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
int (*reconfigure) (struct efx_nic *efx);
bool (*poll) (struct efx_nic *efx);
void (*get_settings) (struct efx_nic *efx,
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index a577be2..db44224 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 3800fc7..67eec7a 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -33,6 +33,9 @@
#define PCS_FW_HEARTBEAT_REG 0xd7ee
#define PCS_FW_HEARTB_LBN 0
#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
#define PCS_UC8051_STATUS_REG 0xd7fd
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
};
#define QT2022C2_MAX_RESET_TIME 500
#define QT2022C2_RESET_WAIT 10
-static int qt2025c_wait_reset(struct efx_nic *efx)
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
{
- unsigned long timeout = jiffies + 10 * HZ;
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
/* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
old_counter = counter;
else if (counter != old_counter)
break;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ EFX_ERR(efx, "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
- msleep(10);
+ }
+ msleep(QT2025C_HEARTB_WAIT);
}
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ EFX_ERR(efx, "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
}
return 0;
@@ -120,15 +318,9 @@ static int qt202x_reset_phy(struct efx_nic *efx)
/* Wait 250ms for the PHY to complete bootup */
msleep(250);
- /* Check that all the MMDs we expect are present and responding. We
- * expect faults on some if the link is down, but not on the PHY XS */
- rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
- if (rc < 0)
- goto fail;
-
falcon_board(efx)->type->init_phy(efx);
- return rc;
+ return 0;
fail:
EFX_ERR(efx, "PHY reset timed out\n");
@@ -137,6 +329,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_phy_probe(struct efx_nic *efx)
{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
efx->mdio.mmds = QT202X_REQUIRED_DEVS;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +347,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_init(struct efx_nic *efx)
{
- struct qt202x_phy_data *phy_data;
u32 devid;
int rc;
@@ -155,17 +356,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
- phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
-
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
- phy_data->phy_mode = efx->phy_mode;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
return 0;
}
@@ -183,6 +381,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
efx->link_state.fd = true;
efx->link_state.fc = efx->wanted_fc;
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
return efx->link_state.up != was_up;
}
@@ -191,6 +392,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
struct qt202x_phy_data *phy_data = efx->phy_data;
if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
/* There are several different register bits which can
* disable TX (and save power) on direct-attach cables
* or optical transceivers, varying somewhat between
@@ -224,7 +429,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-static void qt202x_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_remove(struct efx_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
@@ -236,7 +441,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
- .fini = qt202x_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 14949bb..250c882 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -47,7 +47,7 @@ static const unsigned char payload_source[ETH_ALEN] = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
-static const char *payload_msg =
+static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/**
@@ -79,10 +79,14 @@ struct efx_loopback_state {
static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad = __ffs(efx->mdio.mmds);
+ int devad;
u16 physid1, physid2;
- if (efx->phy_type == PHY_TYPE_NONE)
+ if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
+ devad = __ffs(efx->mdio.mmds);
+ else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
+ devad = MDIO_DEVAD_NONE;
+ else
return 0;
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index de07a4f..f8c6771 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
void siena_remove_port(struct efx_nic *efx)
{
+ efx->phy_op->remove(efx);
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index ca11572..3009c29 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
int rc;
rtnl_lock();
- efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
- MDIO_PMA_10GBT_TXPWR_SHORT,
- count != 0 && *buf != '0');
- rc = efx_reconfigure_port(efx);
+ if (efx->state != STATE_RUNNING) {
+ rc = -EBUSY;
+ } else {
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+ MDIO_PMA_10GBT_TXPWR_SHORT,
+ count != 0 && *buf != '0');
+ rc = efx_reconfigure_port(efx);
+ }
rtnl_unlock();
return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
return 0;
}
-static int sfx7101_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct efx_nic *efx)
{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
- return 0;
-}
+ struct tenxpress_phy_data *phy_data;
+ int rc;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ /* Create any special files */
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_phy_short_reach);
+ if (rc)
+ goto fail;
+ }
+
+ if (efx->phy_type == PHY_TYPE_SFX7101) {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+ } else {
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = (SFT9001_LOOPBACKS |
+ FALCON_XMAC_LOOPBACKS |
+ FALCON_GMAC_LOOPBACKS);
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full);
+ }
-static int sft9001_phy_probe(struct efx_nic *efx)
-{
- efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
- FALCON_GMAC_LOOPBACKS);
return 0;
+
+fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+ return rc;
}
static int tenxpress_phy_init(struct efx_nic *efx)
{
- struct tenxpress_phy_data *phy_data;
- int rc = 0;
+ int rc;
falcon_board(efx)->type->init_phy(efx);
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (!phy_data)
- return -ENOMEM;
- efx->phy_data = phy_data;
- phy_data->phy_mode = efx->phy_mode;
-
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
if (efx->phy_type == PHY_TYPE_SFT9001A) {
int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
- goto fail;
+ return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
- goto fail;
+ return rc;
}
rc = tenxpress_init(efx);
if (rc < 0)
- goto fail;
+ return rc;
- /* Initialise advertising flags */
- efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full);
- if (efx->phy_type != PHY_TYPE_SFX7101)
- efx->link_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full);
+ /* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- rc = device_create_file(&efx->pci_dev->dev,
- &dev_attr_phy_short_reach);
- if (rc)
- goto fail;
- }
-
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
/* Let XGXS and SerDes out of reset */
falcon_reset_xaui(efx);
return 0;
-
- fail:
- kfree(efx->phy_data);
- efx->phy_data = NULL;
- return rc;
}
/* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
-static void tenxpress_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct efx_nic *efx)
{
int reg;
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
if (efx->phy_type == PHY_TYPE_SFT9001B)
device_remove_file(&efx->pci_dev->dev,
&dev_attr_phy_short_reach);
- if (efx->phy_type == PHY_TYPE_SFX7101) {
- /* Power down the LNPGA */
- reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
- efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
-
- /* Waiting here ensures that the board fini, which can turn
- * off the power to the PHY, won't get run until the LNPGA
- * powerdown has been given long enough to complete. */
- schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
- }
-
kfree(efx->phy_data);
efx->phy_data = NULL;
}
@@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
- .probe = sfx7101_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
@@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
};
struct efx_phy_operations falcon_sft9001_phy_ops = {
- .probe = sft9001_phy_probe,
+ .probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
.poll = tenxpress_phy_poll,
- .fini = tenxpress_phy_fini,
+ .fini = efx_port_dummy_op_void,
+ .remove = tenxpress_phy_remove,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index e669f94..a8b70ef 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
EFX_TXQ_MASK];
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->skb);
- buffer->len = 0;
- buffer->continuation = true;
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
PCI_DMA_TODEVICE);
buffer->unmap_len = 0;
}
+ buffer->len = 0;
+ buffer->continuation = true;
}
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c88bc10..7402b85 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -84,6 +84,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
@@ -108,7 +110,7 @@ static void sh_eth_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0)
+ if (cnt == 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
@@ -175,7 +177,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
- .rpadir = 1,
.no_trimd = 1,
.no_ade = 1,
};
@@ -501,6 +502,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
*/
mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
(((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
+ if (mdp->cd->rpadir)
+ mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
@@ -715,6 +718,8 @@ static int sh_eth_rx(struct net_device *ndev)
pkt_len + 2);
skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
+ if (mdp->cd->rpadir)
+ skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 89a05d6..67249c3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
{
struct sky2_tx_le *le = sky2->tx_le + *slot;
- struct tx_ring_info *re = sky2->tx_ring + *slot;
*slot = RING_NEXT(*slot, sky2->tx_ring_size);
- re->flags = 0;
- re->skb = NULL;
le->ctrl = 0;
return le;
}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
return count;
}
-static void sky2_tx_unmap(struct pci_dev *pdev,
- const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
pci_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
+ re->flags = 0;
}
/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ re->skb = NULL;
dev_kfree_skb_any(skb);
sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
@@ -1848,7 +1846,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
netif_wake_queue(dev);
}
@@ -3240,6 +3239,27 @@ static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
+static void sky2_hw_set_wol(struct sky2_hw *hw)
+{
+ int wol = 0;
+ int i;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ wol = 1;
+ }
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+ hw->chip_id == CHIP_ID_YUKON_EX ||
+ hw->chip_id == CHIP_ID_YUKON_FE_P)
+ sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
+
+ device_set_wakeup_enable(&hw->pdev->dev, wol);
+}
+
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
const struct sky2_port *sky2 = netdev_priv(dev);
@@ -3259,13 +3279,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
sky2->wol = wol->wolopts;
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, sky2->wol
- ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
-
- device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
+ sky2_hw_set_wol(hw);
if (!netif_running(dev))
sky2_wol_init(sky2);
@@ -4530,7 +4544,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"Optima", /* 0xbc */
};
- if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
+ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT)
strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
else
snprintf(buf, sz, "(chip %#x)", chipid);
@@ -4697,6 +4711,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
return 0;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60a..f952113 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
if (retval) {
printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
FIRMWARE_RX);
- return retval;
+ goto out_init;
}
if (fw_rx->size % 4) {
printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
@@ -1108,6 +1108,9 @@ out_tx:
release_firmware(fw_tx);
out_rx:
release_firmware(fw_rx);
+out_init:
+ if (retval)
+ netdev_close(dev);
return retval;
}
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d..d71c197 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
/* Transmit complete. */
lp->lstats.tx_ints++;
tc35815_txdone(dev);
- netif_wake_queue(dev);
if (ret < 0)
ret = 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3a74d21..7f82b02 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2009 Broadcom Corporation.
+ * Copyright (C) 2005-2010 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.105"
-#define DRV_MODULE_RELDATE "December 2, 2009"
+#define DRV_MODULE_VERSION "3.106"
+#define DRV_MODULE_RELDATE "January 12, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1037,7 +1037,11 @@ static void tg3_mdio_start(struct tg3 *tp)
else
tp->phy_addr = 1;
- is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+ else
+ is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+ TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
@@ -4693,8 +4697,9 @@ next_pkt:
(*post_ptr)++;
if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
- u32 idx = *post_ptr % TG3_RX_RING_SIZE;
- tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
+ tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ tpr->rx_std_prod_idx);
work_mask &= ~RXD_OPAQUE_RING_STD;
rx_std_posted = 0;
}
@@ -7742,7 +7747,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
@@ -12122,7 +12127,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
else
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -13384,6 +13390,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (err)
return err;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
+ return -ENOTSUPP;
+
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
val &= GRC_MODE_HOST_STACKUP;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cd30889..8a16791 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,6 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2010 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -1054,6 +1055,8 @@
#define CPMU_MUTEX_REQ_DRIVER 0x00001000
#define TG3_CPMU_MUTEX_GNT 0x00003660
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
+#define TG3_CPMU_PHY_STRAP 0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
/* 0x3664 --> 0x3800 unused */
/* Mbuf cluster free registers */
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 1cc8cf4..516713f 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
If in doubt, say Y.
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on PCI || EISA
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index ad63621..6f44ebf 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -92,6 +92,10 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
/* Board/System/Debug information/definition ---------------- */
#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -377,6 +381,23 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (!printed_version++)
printk(version);
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ printk(KERN_INFO DRV_NAME
+ ": skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0fa3140..20696b5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = {
| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
+#ifdef CONFIG_TULIP_DM910X
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
@@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -243,6 +249,7 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
+ { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ } /* terminate list */
};
@@ -1299,18 +1306,30 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
/*
- * Early DM9100's need software CRC and the DMFE driver
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
*/
- if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
- {
- /* Read Chip revision */
- if (pdev->revision < 0x30)
- {
- printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ printk(KERN_INFO PFX
+ "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ printk(KERN_INFO PFX
+ "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
+#endif
/*
* Looks for early PCI chipsets where people report hangs
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 01e99f2..2834a01 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible_sync(sk->sk_sleep);
- tun = container_of(sk, struct tun_sock, sk)->tun;
+ tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
- free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+ free_netdev(tun_sk(sk)->tun->dev);
}
static struct proto tun_proto = {
@@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
- container_of(sk, struct tun_sock, sk)->tun = tun;
+ tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index afaf088..eb8fe7e 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
static void ugeth_quiesce(struct ucc_geth_private *ugeth)
{
- /* Wait for and prevent any further xmits. */
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
netif_tx_disable(ugeth->ndev);
/* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
{
napi_enable(&ugeth->napi);
enable_irq(ugeth->ug_info->uf_info.irq);
- netif_tx_wake_all_queues(ugeth->ndev);
+ netif_device_attach(ugeth->ndev);
}
/* Called every time the controller might need to be made
@@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
ugeth->oldspeed = phydev->speed;
}
- /*
- * To change the MAC configuration we need to disable the
- * controller. To do so, we have to either grab ugeth->lock,
- * which is a bad idea since 'graceful stop' commands might
- * take quite a while, or we can quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
-
if (!ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 1;
}
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
} else if (ugeth->oldlink) {
new_state = 1;
ugeth->oldlink = 0;
@@ -3273,13 +3279,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
- if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
break;
dev->stats.tx_packets++;
- skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
-
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
@@ -3601,6 +3606,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
if (!netif_running(ndev))
return 0;
+ netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
@@ -3659,7 +3665,7 @@ static int ucc_geth_resume(struct of_device *ofdev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
- netif_start_queue(ndev);
+ netif_device_attach(ndev);
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a007e2a..ef1fbeb 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics {
using the maximum is
easier */
#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
-#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
-#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
is a
guess
*/
@@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 128
+#define UCC_GETH_UTFTT_INIT 512
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
-#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
FIFO size */
-#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
-#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
#define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e183a..5f3b9ea 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -419,7 +419,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
- .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER,
// .check_connect = cdc_check_connect,
.bind = cdc_bind,
.unbind = usbnet_cdc_unbind,
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = {
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &mbm_info,
}, {
+ /* Ericsson C3607w ver 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &mbm_info,
+}, {
/* Toshiba F3507g */
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f78f090..6895f15 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -286,6 +286,7 @@ struct hso_device {
u8 usb_gone;
struct work_struct async_get_intf;
struct work_struct async_put_intf;
+ struct work_struct reset_device;
struct usb_device *usb;
struct usb_interface *interface;
@@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial);
/* Helper functions */
static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
struct usb_device *usb, gfp_t gfp);
-static void log_usb_status(int status, const char *function);
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev);
static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
int type, int dir);
static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
@@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data);
static int hso_put_activity(struct hso_device *hso_dev);
static int hso_get_activity(struct hso_device *hso_dev);
static void tiocmget_intr_callback(struct urb *urb);
+static void reset_device(struct work_struct *data);
/*****************************************************************************/
/* Helping functions */
/*****************************************************************************/
@@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
{USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7701)},
+ {USB_DEVICE(0x0af0, 0x7706)},
{USB_DEVICE(0x0af0, 0x7801)},
{USB_DEVICE(0x0af0, 0x7901)},
+ {USB_DEVICE(0x0af0, 0x7A01)},
+ {USB_DEVICE(0x0af0, 0x7A05)},
{USB_DEVICE(0x0af0, 0x8200)},
{USB_DEVICE(0x0af0, 0x8201)},
+ {USB_DEVICE(0x0af0, 0x8300)},
+ {USB_DEVICE(0x0af0, 0x8302)},
+ {USB_DEVICE(0x0af0, 0x8304)},
+ {USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0xd157)},
{USB_DEVICE(0x0af0, 0xd257)},
{USB_DEVICE(0x0af0, 0xd357)},
+ {USB_DEVICE(0x0af0, 0xd058)},
+ {USB_DEVICE(0x0af0, 0xc100)},
{}
};
MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
spin_unlock_irqrestore(&serial_table_lock, flags);
}
-/* log a meaningful explanation of an USB status */
-static void log_usb_status(int status, const char *function)
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev)
{
char *explanation;
@@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function)
case -EMSGSIZE:
explanation = "internal error";
break;
+ case -EILSEQ:
+ case -EPROTO:
+ case -ETIME:
+ case -ETIMEDOUT:
+ explanation = "protocol error";
+ if (hso_dev)
+ schedule_work(&hso_dev->reset_device);
+ break;
default:
explanation = "unknown status";
break;
}
+
+ /* log a meaningful explanation of an USB status */
D1("%s: received USB status - %s (%d)", function, explanation, status);
}
@@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb)
/* log status, but don't act on it, we don't need to resubmit anything
* anyhow */
if (status)
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
hso_put_activity(odev->parent);
@@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
if (result) {
dev_warn(&odev->parent->interface->dev,
- "failed mux_bulk_tx_urb %d", result);
+ "failed mux_bulk_tx_urb %d\n", result);
net->stats.tx_errors++;
netif_start_queue(net);
} else {
@@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb)
/* is al ok? (Filip: Who's Al ?) */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
return;
}
@@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb)
if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
u32 rest;
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
+ rest = urb->actual_length %
+ le16_to_cpu(odev->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb)
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_warn(&odev->parent->interface->dev,
- "%s failed submit mux_bulk_rx_urb %d", __func__,
+ "%s failed submit mux_bulk_rx_urb %d\n", __func__,
result);
}
@@ -1207,7 +1230,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
D1("serial == NULL");
return;
} else if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
@@ -1225,7 +1248,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
rest =
urb->actual_length %
- serial->in_endp->wMaxPacketSize;
+ le16_to_cpu(serial->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1513,7 +1536,7 @@ static void tiocmget_intr_callback(struct urb *urb)
if (!serial)
return;
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
tiocmget = serial->tiocmget;
@@ -1700,6 +1723,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
D1("no tty structures");
return -EINVAL;
}
+
+ if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
+ return -EINVAL;
+
if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1838,7 +1865,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
if (result) {
dev_err(&ctrl_urb->dev->dev,
- "%s failed submit ctrl_urb %d type %d", __func__,
+ "%s failed submit ctrl_urb %d type %d\n", __func__,
result, type);
return result;
}
@@ -1888,7 +1915,7 @@ static void intr_callback(struct urb *urb)
/* status check */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, NULL);
return;
}
D4("\n--- Got intr callback 0x%02X ---", status);
@@ -1905,18 +1932,18 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
D1("Pending read interrupt on port %d\n", i);
spin_lock(&serial->serial_lock);
- if (serial->rx_state == RX_IDLE) {
+ if (serial->rx_state == RX_IDLE &&
+ serial->open_count > 0) {
/* Setup and send a ctrl req read on
* port i */
- if (!serial->rx_urb_filled[0]) {
+ if (!serial->rx_urb_filled[0]) {
serial->rx_state = RX_SENT;
hso_mux_serial_read(serial);
} else
serial->rx_state = RX_PENDING;
-
} else {
- D1("Already pending a read on "
- "port %d\n", i);
+ D1("Already a read pending on "
+ "port %d or port not open\n", i);
}
spin_unlock(&serial->serial_lock);
}
@@ -1958,7 +1985,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2014,7 +2041,7 @@ static void ctrl_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2358,12 +2385,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
@@ -2391,6 +2418,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
+ INIT_WORK(&hso_dev->reset_device, reset_device);
return hso_dev;
}
@@ -2831,13 +2859,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mux->shared_intr_urb) {
- dev_err(&interface->dev, "Could not allocate intr urb?");
+ dev_err(&interface->dev, "Could not allocate intr urb?\n");
goto exit;
}
- mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize,
- GFP_KERNEL);
+ mux->shared_intr_buf =
+ kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
+ GFP_KERNEL);
if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?");
+ dev_err(&interface->dev, "Could not allocate intr buf?\n");
goto exit;
}
@@ -3132,6 +3161,26 @@ out:
return result;
}
+static void reset_device(struct work_struct *data)
+{
+ struct hso_device *hso_dev =
+ container_of(data, struct hso_device, reset_device);
+ struct usb_device *usb = hso_dev->usb;
+ int result;
+
+ if (hso_dev->usb_gone) {
+ D1("No reset during disconnect\n");
+ } else {
+ result = usb_lock_device_for_reset(usb, hso_dev->interface);
+ if (result < 0)
+ D1("unable to lock device for reset: %d\n", result);
+ else {
+ usb_reset_device(usb);
+ usb_unlock_device(usb);
+ }
+ }
+}
+
static void hso_serial_ref_free(struct kref *ref)
{
struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3232,13 +3281,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
usb_rcvintpipe(usb,
shared_int->intr_endp->bEndpointAddress & 0x7F),
shared_int->shared_intr_buf,
- shared_int->intr_endp->wMaxPacketSize,
+ 1,
intr_callback, shared_int,
shared_int->intr_endp->bInterval);
result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
if (result)
- dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__,
+ dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__,
result);
return result;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index b091e20..fd19db0 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT) {
+ if (i <= MII_TIMEOUT) {
get_registers(dev, PHYDAT, 2, data);
*reg = data[0] | (data[1] << 8);
return 0;
@@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT)
+ if (i <= MII_TIMEOUT)
return 0;
else
return 1;
@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
dbg("%02X:", netdev->dev_addr[i]);
dbg("%02X\n", netdev->dev_addr[i]);
/* Set the IDR registers. */
- set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
+ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
u8 cr;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 593e01f..611b804 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
+#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct work_struct reset_task;
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ INIT_WORK(&rp->reset_task, rhine_reset_task);
+
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
return 0;
}
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
-
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ struct rhine_private *rp = container_of(work, struct rhine_private,
+ reset_task);
+ struct net_device *dev = rp->dev;
/* protect against concurrent rx interrupts */
disable_irq(rp->pdev->irq);
napi_disable(&rp->napi);
- spin_lock(&rp->lock);
+ spin_lock_bh(&rp->lock);
/* clear all descriptors */
free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
- spin_unlock(&rp->lock);
+ spin_unlock_bh(&rp->lock);
enable_irq(rp->pdev->irq);
dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void rhine_tx_timeout(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+ "%4.4x, resetting...\n",
+ dev->name, ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ schedule_work(&rp->reset_task);
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- spin_lock_irq(&rp->lock);
-
- netif_stop_queue(dev);
napi_disable(&rp->napi);
+ cancel_work_sync(&rp->reset_task);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4ceb441..317aa34 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
* @vptr; Velocity
- * @status:
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
* necessary/
*/
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
{
struct tx_desc *td;
int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
- * @status: adapter status (unused)
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-static int velocity_rx_srv(struct velocity_info *vptr, int status,
- int budget_left)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
struct velocity_info *vptr = container_of(napi,
struct velocity_info, napi);
unsigned int rx_done;
- u32 isr_status;
-
- spin_lock(&vptr->lock);
- isr_status = mac_read_isr(vptr->mac_regs);
-
- /* Ack the interrupt */
- mac_write_isr(vptr->mac_regs, isr_status);
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
- velocity_error(vptr, isr_status);
+ unsigned long flags;
+ spin_lock_irqsave(&vptr->lock, flags);
/*
* Do rx and tx twice for performance (taken from the VIA
* out-of-tree driver).
*/
- rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
- velocity_tx_srv(vptr, isr_status);
- rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
- velocity_tx_srv(vptr, isr_status);
-
- spin_unlock(&vptr->lock);
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
napi_complete(napi);
mac_enable_int(vptr->mac_regs);
}
+ spin_unlock_irqrestore(&vptr->lock, flags);
return rx_done;
}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
return IRQ_NONE;
}
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
+
if (likely(napi_schedule_prep(&vptr->napi))) {
mac_disable_int(vptr->mac_regs);
__napi_schedule(&vptr->napi);
}
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
spin_unlock(&vptr->lock);
return IRQ_HANDLED;
@@ -2237,8 +2233,6 @@ static int velocity_open(struct net_device *dev)
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2250,6 +2244,8 @@ static int velocity_open(struct net_device *dev)
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
napi_enable(&vptr->napi);
@@ -2339,10 +2335,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
velocity_init_registers(vptr, VELOCITY_INIT_WOL);
mac_disable_int(vptr->mac_regs);
- velocity_tx_srv(vptr, 0);
+ velocity_tx_srv(vptr);
for (i = 0; i < vptr->tx.numq; i++) {
if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
{
struct velocity_info *vptr = netdev_priv(dev);
int max_us = 0x3f * 64;
+ unsigned long flags;
/* 6 bits of */
if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
ecmd->tx_coalesce_usecs);
/* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
mac_disable_int(vptr->mac_regs);
setup_adaptive_interrupts(vptr);
setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
mac_clear_isr(vptr->mac_regs);
mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c708ecc..9ead30b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -395,8 +395,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
- try_fill_recv(vi, GFP_KERNEL);
- still_empty = (vi->num == 0);
+ still_empty = !try_fill_recv(vi, GFP_KERNEL);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index f1c4b2a..b9685e8 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -310,7 +310,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
rx_priv->data_size, PCI_DMA_FROMDEVICE);
- if (dma_addr == 0) {
+ if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
ring->stats.pci_map_fail++;
return -EIO;
}
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
high_dma = 1;
if (pci_set_consistent_dma_mask(pdev,
- 0xffffffffffffffffULL)) {
+ DMA_BIT_MASK(64))) {
vxge_debug_init(VXGE_ERR,
"%s : unable to obtain 64bit DMA for "
"consistent allocations", __func__);
ret = -ENOMEM;
goto _exit1;
}
- } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 32bit DMA", __func__);
} else {
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 5cc0f27..2d7c96d 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -151,6 +151,7 @@ enum {
/* Device IDs */
USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
};
@@ -234,6 +235,7 @@ struct i2400mu {
u8 rx_size_auto_shrink;
struct dentry *debugfs_dentry;
+ unsigned i6050:1; /* 1 if this is a 6050 based SKU */
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 3b48681..98f4f8c5 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -478,7 +478,16 @@ int i2400mu_probe(struct usb_interface *iface,
i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
i2400m->bus_bm_mac_addr_impaired = 0;
- if (id->idProduct == USB_DEVICE_ID_I6050) {
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
+ i2400mu->i6050 = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (i2400mu->i6050) {
i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
i2400mu->endpoint_cfg.bulk_out = 0;
i2400mu->endpoint_cfg.notification = 3;
@@ -719,6 +728,7 @@ int i2400mu_post_reset(struct usb_interface *iface)
static
struct usb_device_id i2400mu_id_table[] = {
{ USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
{ USB_DEVICE(0x8086, 0x0181) },
{ USB_DEVICE(0x8086, 0x1403) },
{ USB_DEVICE(0x8086, 0x1405) },
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4c086f..e63b7c4 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1903,17 +1903,6 @@ accept:
rxs->noise = sc->ah->ah_noise_floor;
rxs->signal = rxs->noise + rs.rs_rssi;
- /* An rssi of 35 indicates you should be able use
- * 54 Mbps reliably. A more elaborate scheme can be used
- * here but it requires a map of SNR/throughput for each
- * possible mode used */
- rxs->qual = rs.rs_rssi * 100 / 35;
-
- /* rssi can be more than 35 though, anything above that
- * should be considered at 100% */
- if (rxs->qual > 100)
- rxs->qual = 100;
-
rxs->antenna = rs.rs_antenna;
rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
*/
ath5k_stop_locked(sc);
+ /* Set PHY calibration interval */
+ ah->ah_cal_intval = ath5k_calinterval;
+
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
@@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
/* Set ack to be sent at low bit-rates */
ath5k_hw_set_ack_bitrate_high(ah, false);
-
- /* Set PHY calibration inteval */
- ah->ah_cal_intval = ath5k_calinterval;
-
ret = 0;
done:
mmiowb();
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 5d1c867..6a3f4da 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -97,7 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
- u32 cksum, offset;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
@@ -116,12 +116,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
- for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
return -EIO;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 0123f35..473a483 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -37,6 +37,14 @@
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 03a1106..5774cea 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -25,7 +25,7 @@ config ATH9K
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e2cef2f..1597a42 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -33,11 +33,11 @@ struct ath_node;
/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 8) ? \
+#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 16) ? \
+ (sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 32) ? \
+ ((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ec61f0..ae37144 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -855,12 +855,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
}
}
-static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
+static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
{
u32 i, j;
- if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
- test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
+ if (ah->hw_version.devid == AR9280_DEVID_PCI) {
/* EEPROM Fixup */
for (i = 0; i < ah->iniModes.ia_rows; i++) {
@@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- ath9k_hw_init_11a_eeprom_fix(ah);
+ ath9k_hw_init_eeprom_fix(ah);
r = ath9k_hw_init_macaddr(ah);
if (r) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 71b84d9..efc420c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
wait = wait_time;
while (ath9k_hw_numtxpending(ah, q)) {
if ((--wait) == 0) {
- ath_print(common, ATH_DBG_QUEUE,
+ ath_print(common, ATH_DBG_FATAL,
"Failed to stop TX DMA in 100 "
"msec after killing last frame\n");
break;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0c87771..e185479 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -77,6 +77,9 @@
#define ATH9K_TXERR_XTXOP 0x08
#define ATH9K_TXERR_TIMER_EXPIRED 0x10
#define ATH9K_TX_ACKED 0x20
+#define ATH9K_TXERR_MASK \
+ (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \
+ ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
#define ATH9K_TX_BA 0x01
#define ATH9K_TX_PWRMGMT 0x02
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c487434..643bea3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ /* Stop ANI */
+ del_timer_sync(&common->ani.timer);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ /* Start ANI */
+ ath_start_ani(common);
+
return r;
}
@@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
return; /* another wiphy still in use */
}
+ /* Ensure HW is awake when we try to shut it down. */
+ ath9k_ps_wakeup(sc);
+
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
+ ath9k_ps_restore(sc);
+
+ /* Finally, put the chip in FULL SLEEP mode */
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
@@ -2641,10 +2653,12 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
(sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ ath9k_ps_wakeup(sc);
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- ath_beacon_return(sc, avp);
+ ath9k_ps_restore(sc);
}
+ ath_beacon_return(sc, avp);
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
@@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_start(sc, sta, tid, ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_STOP:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ ath9k_ps_restore(sc);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ath9k_ps_wakeup(sc);
ath_tx_aggr_resume(sc, sta, tid);
+ ath9k_ps_restore(sc);
break;
default:
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 5321f73..f7af5ea 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
-const static struct ath_bus_ops ath_pci_bus_ops = {
+static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
.cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 2a11cc5..29bf336 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
if (npend) {
int r;
- ath_print(common, ATH_DBG_XMIT,
+ ath_print(common, ATH_DBG_FATAL,
"Unable to stop TxDMA. Reset HAL!\n");
spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
+ r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
if (r)
ath_print(common, ATH_DBG_FATAL,
"Unable to reset hardware; reset status %d\n",
@@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
* For HT capable stations, we save tidno for later use.
* We also override seqno set by upper layer with the one
* in tx aggregation state.
- *
- * If fragmentation is on, the sequence number is
- * not overridden, since it has been
- * incremented by the fragmentation routine.
- *
- * FIXME: check if the fragmentation threshold exceeds
- * IEEE80211 max.
*/
tid = ATH_AN_2_TID(an, bf->bf_tidno);
- hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
- IEEE80211_SEQ_SEQ_SHIFT);
+ hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
bf->bf_seqno = tid->seq_next;
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
@@ -1623,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf) && !is_pae(skb))
+ if (conf_is_ht(&hw->conf))
bf->bf_state.bf_type |= BUF_HT;
bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
}
- if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
+ if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
+ (sc->sc_flags & SC_OP_TXAGGR))
assign_aggr_tid_seqno(skb, bf);
bf->bf_mpdu = skb;
@@ -1708,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
goto tx_done;
}
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
@@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int padpos, padsize;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath_tx_control txctl;
@@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
* BSSes.
*/
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
sc->tx.seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* Add the padding after the header if this is not already done */
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- if (hdrlen & 3) {
- padsize = hdrlen % 4;
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos) {
if (skb_headroom(skb) < padsize) {
ath_print(common, ATH_DBG_XMIT,
"TX CABQ padding failed\n");
@@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
skb_push(skb, padsize);
- memmove(skb->data, skb->data + padsize, hdrlen);
+ memmove(skb->data, skb->data + padsize, padpos);
}
txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- int hdrlen, padsize;
+ struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ int padpos, padsize;
ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
@@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- padsize = hdrlen & 3;
- if (padsize && hdrlen >= 24) {
+ padpos = ath9k_cmn_padpos(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
/*
* Remove MAC header padding before giving the frame back to
* mac80211.
*/
- memmove(skb->data + padsize, skb->data, hdrlen);
+ memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
@@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
+ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf94..c484cc2 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
#define B43_MMIO_RNG 0x65A
+#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 027be27..88d1fd0 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,160 +383,44 @@ static inline
}
}
-/* Check if a DMA region fits the device constraints.
- * Returns true, if the region is OK for usage with this device. */
-static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
- dma_addr_t addr, size_t size)
-{
- switch (ring->type) {
- case B43_DMA_30BIT:
- if ((u64)addr + size > (1ULL << 30))
- return 0;
- break;
- case B43_DMA_32BIT:
- if ((u64)addr + size > (1ULL << 32))
- return 0;
- break;
- case B43_DMA_64BIT:
- /* Currently we can't have addresses beyond
- * 64bit in the kernel. */
- break;
- }
- return 1;
-}
-
-#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
-#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
-
-static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
- dma_addr_t dmaaddr, size_t size)
-{
- ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
- free_pages((unsigned long)base, get_order(size));
-}
-
-static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size,
- gfp_t gfp_flags)
-{
- void *base;
-
- base = (void *)__get_free_pages(gfp_flags, get_order(size));
- if (!base)
- return NULL;
- memset(base, 0, size);
- *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
- DMA_TO_DEVICE);
- if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
- free_pages((unsigned long)base, get_order(size));
- return NULL;
- }
-
- return base;
-}
-
-static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
- dma_addr_t *dmaaddr, size_t size)
-{
- void *base;
-
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- /* The memory does not fit our device constraints.
- * Retry with GFP_DMA set to get lower memory. */
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
- GFP_KERNEL | GFP_DMA);
- if (!base) {
- b43err(ring->dev->wl, "Failed to allocate or map pages "
- "in the GFP_DMA region for DMA ringmemory\n");
- return NULL;
- }
- if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- b43err(ring->dev->wl, "Failed to allocate DMA "
- "ringmemory that fits device constraints\n");
- return NULL;
- }
- }
- /* We expect the memory to be 4k aligned, at least. */
- if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
- b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
- return NULL;
- }
-
- return base;
-}
-
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- unsigned int required;
- void *base;
- dma_addr_t dmaaddr;
-
- /* There are several requirements to the descriptor ring memory:
- * - The memory region needs to fit the address constraints for the
- * device (same as for frame buffers).
- * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
- * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
+ gfp_t flags = GFP_KERNEL;
+
+ /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
+ * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
+ * has shown that 4K is sufficient for the latter as long as the buffer
+ * does not cross an 8K boundary.
+ *
+ * For unknown reasons - possibly a hardware error - the BCM4311 rev
+ * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
+ * which accounts for the GFP_DMA flag below.
+ *
+ * The flags here must match the flags in free_ringmemory below!
*/
-
if (ring->type == B43_DMA_64BIT)
- required = ring->nr_slots * sizeof(struct b43_dmadesc64);
- else
- required = ring->nr_slots * sizeof(struct b43_dmadesc32);
- if (B43_WARN_ON(required > 0x1000))
+ flags |= GFP_DMA;
+ ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
+ if (!ring->descbase) {
+ b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
-
- ring->alloc_descsize = 0x1000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
- return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
- /* We're on <=32bit DMA, or we already got 8k aligned memory.
- * That's all we need, so we're fine. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
- }
- b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
-
- /* Ok, we failed at the 8k alignment requirement.
- * Try to force-align the memory region now. */
- ring->alloc_descsize = 0x2000;
- base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
- if (!base)
- return -ENOMEM;
- ring->alloc_descbase = base;
- ring->alloc_dmabase = dmaaddr;
-
- if (is_8k_aligned(dmaaddr)) {
- /* We're already 8k aligned. That Ok, too. */
- ring->descbase = base;
- ring->dmabase = dmaaddr;
- return 0;
}
- /* Force-align it to 8k */
- ring->descbase = (void *)((u8 *)base + 0x1000);
- ring->dmabase = dmaaddr + 0x1000;
- B43_WARN_ON(!is_8k_aligned(ring->dmabase));
+ memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
return 0;
}
static void free_ringmemory(struct b43_dmaring *ring)
{
- b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
- ring->alloc_dmabase, ring->alloc_descsize);
+ gfp_t flags = GFP_KERNEL;
+
+ if (ring->type == B43_DMA_64BIT)
+ flags |= GFP_DMA;
+
+ ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase, flags);
}
/* Reset the RX DMA channel */
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1;
- if (!b43_dma_address_ok(ring, addr, buffersize)) {
- /* We can't support this address. Unmap it again. */
- unmap_descbuffer(ring, addr, buffersize, dma_to_device);
- return 1;
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + buffersize > (1ULL << 30))
+ goto address_error;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + buffersize > (1ULL << 32))
+ goto address_error;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
}
/* The address is OK. */
return 0;
+
+address_error:
+ /* We can't support this address. Unmap it again. */
+ unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+
+ return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
return 0;
}
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
}
/* Now transfer the whole frame. */
wmb();
- ssb_dma_sync_single_for_device(ring->dev->dev,
- ring->alloc_dmabase,
- ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot));
return 0;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e607b39..f7ab37c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
} __attribute__ ((__packed__));
/* Misc DMA constants */
+#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
@@ -246,12 +247,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
- /* Pointers and size of the originally allocated and mapped memory
- * region for the descriptor ring. */
- void *alloc_descbase;
- dma_addr_t alloc_dmabase;
- unsigned int alloc_descsize;
- /* Pointer to our wireless device. */
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe..490fb45d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
- if (dev->phy.type != B43_PHYTYPE_G)
+ /* This test used to exit for all but a G PHY. */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
return;
- b43_write16(dev, 0x684, 510 + slot_time);
- b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
+ /* Shared memory location 0x0010 is the slot time and should be
+ * set to slot_time; however, this register is initially 0 and changing
+ * the value adversely affects the transmit rate for BCM4311
+ * devices. Until this behavior is unterstood, delete this step
+ *
+ * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ */
}
static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7da1dab..234891d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
snr = rx_stats_sig_avg / rx_stats_noise_diff;
rx_status.noise = rx_status.signal -
iwl3945_calc_db_from_ratio(snr);
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
- rx_status.noise);
-
- /* If noise info not available, calculate signal quality indicator (%)
- * using just the dBm signal level. */
} else {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
}
- IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
+ rx_status.signal, rx_status.noise,
rx_stats_sig_avg, rx_stats_noise_diff);
header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
rc = -EIO;
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.use_isr_legacy = true,
.ht_greenfield_support = false,
.led_compensation = 64,
+ .broken_powersave = true,
};
struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index ecc23ec..531fa12 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
*
*****************************************************************************/
extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 386513b..3146281 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
/* calculate tx gain adjustment based on power supply voltage */
- voltage = priv->calib_info->voltage;
+ voltage = le16_to_cpu(priv->calib_info->voltage);
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
voltage_compensation =
iwl4965_get_voltage_compensation(voltage, init_voltage);
@@ -1961,7 +1961,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int tid = MAX_TID_COUNT;
+ int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 4ef6804..bc056e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -92,11 +92,15 @@
static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
- u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
- EEPROM_5000_TEMPERATURE);
- /* offset = temperature - voltage / coef */
- s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
- return offset;
+ u16 temperature, voltage;
+ __le16 *temp_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
+
+ temperature = le16_to_cpu(temp_calib[0]);
+ voltage = le16_to_cpu(temp_calib[1]);
+
+ /* offset = temp - volt / coeff */
+ return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
}
/* Fixed (non-configurable) rx data from phy */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e2f8615..cffaae7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+ __le16 *xtal_calib =
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
cmd.hdr.first_group = 0;
cmd.hdr.groups_num = 1;
cmd.hdr.data_valid = 1;
- cmd.cap_pin1 = (u8)xtal_calib[0];
- cmd.cap_pin2 = (u8)xtal_calib[1];
+ cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+ cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
(u8 *)&cmd, sizeof(cmd));
}
@@ -1124,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
scd_ssn , index, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1152,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
@@ -1597,6 +1596,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@@ -1621,6 +1621,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
};
@@ -1666,6 +1667,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@@ -1690,6 +1692,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
@@ -1714,6 +1717,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_bsm = false,
.ht_greenfield_support = true,
.led_compensation = 51,
+ .use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index fe511cb..b93e491 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
};
/* mbps, mcs */
-const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
{ "2", "QPSK DSSS"},
{"5.5", "BPSK CCK"},
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b8377ef..1c9866d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 574d366..f36f804 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2344,6 +2344,21 @@ static void iwl_ht_conf(struct iwl_priv *priv,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static inline void iwl_set_no_assoc(struct iwl_priv *priv)
+{
+ priv->assoc_id = 0;
+ iwl_led_disassociate(priv);
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ priv->staging_rxon.assoc_id = 0;
+ iwlcore_commit_rxon(priv);
+}
+
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2475,20 +2490,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
priv->cfg->ops->lib->post_associate(priv);
- } else {
- priv->assoc_id = 0;
- iwl_led_disassociate(priv);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * send
- */
- priv->staging_rxon.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = 0;
- iwlcore_commit_rxon(priv);
- }
+ } else
+ iwl_set_no_assoc(priv);
}
if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2503,12 +2506,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
- vif->bss_conf.enable_beacon) {
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ iwlcore_config_ap(priv);
+ } else
+ iwl_set_no_assoc(priv);
}
mutex_unlock(&priv->mutex);
@@ -2740,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
priv->staging_rxon.flags = 0;
iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, conf->channel->band);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 675b7df..b69e972 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,7 +63,7 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
/************************
* forward declarations *
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index a7bfae0..1ec8cb4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -77,8 +77,7 @@
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
*
- * NOTE: Newer devices using one-time-programmable (OTP) memory
- * require device to be awake in order to read this memory
+ * NOTE: Device does need to be awake in order to read this memory
* via CSR_EEPROM and CSR_OTP registers
*/
#define CSR_BASE (0x000)
@@ -111,9 +110,8 @@
/*
* EEPROM and OTP (one-time-programmable) memory reads
*
- * NOTE: For (newer) devices using OTP, device must be awake, initialized via
- * apm_ops.init() in order to read. Older devices (3945/4965/5000)
- * use EEPROM and do not require this.
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
*/
#define CSR_EEPROM_REG (CSR_BASE+0x02c)
#define CSR_EEPROM_GP (CSR_BASE+0x030)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2673e9a..3822cf5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -711,7 +711,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
- return q->write_ptr > q->read_ptr ?
+ return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
@@ -1168,7 +1168,7 @@ struct iwl_priv {
u32 last_beacon_time;
u64 last_tsf;
- /* eeprom */
+ /* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
@@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
}
+static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
+{
+ __free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+
+static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+ free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1..83cc4e5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -1,3 +1,29 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#include <linux/module.h>
/* sparse doesn't like tracepoint macros */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 2136196..d9c7363 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -1,3 +1,29 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __IWLWIFI_DEVICE_TRACE
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3946e5c..4a30969 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
@@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
}
- *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
+ *eeprom_data = cpu_to_le16(r >> 16);
return 0;
}
@@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
*/
static bool iwl_is_otp_empty(struct iwl_priv *priv)
{
- u16 next_link_addr = 0, link_value;
+ u16 next_link_addr = 0;
+ __le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
@@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
static int iwl_find_otp_image(struct iwl_priv *priv,
u16 *validblockaddr)
{
- u16 next_link_addr = 0, link_value = 0, valid_addr;
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* check for more block on the link list
*/
valid_addr = next_link_addr;
- next_link_addr = link_value * sizeof(u16);
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
int iwl_eeprom_init(struct iwl_priv *priv)
{
- u16 *e;
+ __le16 *e;
u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
int sz;
int ret;
@@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
ret = -ENOMEM;
goto alloc_err;
}
- e = (u16 *)priv->eeprom;
+ e = (__le16 *)priv->eeprom;
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- /* OTP reads require powered-up chip */
- priv->cfg->ops->lib->apm_ops.init(priv);
- }
+ priv->cfg->ops->lib->apm_ops.init(priv);
ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
if (ret < 0) {
@@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
for (addr = validblockaddr; addr < validblockaddr + sz;
addr += sizeof(u16)) {
- u16 eeprom_data;
+ __le16 eeprom_data;
ret = iwl_read_otp_word(priv, addr, &eeprom_data);
if (ret)
@@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
e[cache_addr / 2] = eeprom_data;
cache_addr += sizeof(u16);
}
-
- /*
- * Now that OTP reads are complete, reset chip to save
- * power until we load uCode during "up".
- */
- priv->cfg->ops->lib->apm_ops.stop(priv);
-
} else {
/* eeprom is an array of 16bit values */
for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
goto done;
}
r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
+ e[addr / 2] = cpu_to_le16(r >> 16);
}
}
ret = 0;
@@ -603,6 +595,8 @@ done:
err:
if (ret)
iwl_eeprom_free(priv);
+ /* Reset chip to save power until we load uCode during "up". */
+ priv->cfg->ops->lib->apm_ops.stop(priv);
alloc_err:
return ret;
}
@@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
ch_info->ht40_eeprom = *eeprom_ch;
ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
ch_info->ht40_flags = eeprom_ch->flags;
- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 5cd2b66..0cd9c02 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
*
*/
struct iwl_eeprom_enhanced_txpwr {
- u16 common;
+ __le16 common;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
@@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
struct iwl_eeprom_calib_info {
u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
u8 saturation_power52; /* half-dBm */
- s16 voltage; /* signed */
+ __le16 voltage; /* signed */
struct iwl_eeprom_calib_subband_info
band_info[EEPROM_TX_POWER_BANDS];
} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a231659..30e9ea6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -234,7 +234,7 @@ cancel:
}
fail:
if (cmd->reply_page) {
- free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd->reply_page);
cmd->reply_page = 0;
}
out:
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6090bc1..2dbce85 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
@@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_reply_statistics);
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwl_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
@@ -973,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+ le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
+ (ieee80211_is_data_qos(fc) &&
+ *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
@@ -1105,11 +1063,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
if (iwl_is_associated(priv) &&
!test_bit(STATUS_SCANNING, &priv->status)) {
rx_status.noise = priv->last_rx_noise;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
- rx_status.noise);
} else {
rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
- rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
}
/* Reset beacon noise level if not associated. */
@@ -1122,8 +1077,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
iwl_dbg_report_frame(priv, phy_res, len, header, 1);
#endif
iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
- rx_status.signal, rx_status.noise, rx_status.qual,
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
+ rx_status.signal, rx_status.noise,
(unsigned long long)rx_status.mactime);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a2b2b83..fa1c89b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
clear_bit(STATUS_SCAN_HW, &priv->status);
}
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cd6a690..90fbdb2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
break;
}
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
@@ -299,7 +297,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
}
EXPORT_SYMBOL(iwl_add_station);
-static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
{
unsigned long flags;
u8 sta_id = iwl_find_station(priv, addr);
@@ -326,7 +324,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
{
struct iwl_rem_sta_cmd *rm_sta =
(struct iwl_rem_sta_cmd *)cmd->cmd.payload;
- const char *addr = rm_sta->addr;
+ const u8 *addr = rm_sta->addr;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
@@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
break;
}
}
-
- priv->alloc_rxb_page--;
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 00da5e1..8f40715 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+EXPORT_SYMBOL(iwl_free_tfds_in_queue);
+
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@@ -407,13 +421,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
int txq_id;
/* Tx queues */
- if (priv->txq)
+ if (priv->txq) {
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
+ }
iwl_free_dma_ptr(priv, &priv->kw);
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
@@ -1130,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
+ struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1144,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
tx_info = &txq->txb[txq->q.read_ptr];
iwl_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- nfreed++;
}
return nfreed;
}
@@ -1558,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 2a28a1f..f8e4e4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
+ if ((iwl_queue_space(q) < q->high_mark))
+ goto drop;
+
spin_lock_irqsave(&priv->lock, flags);
idx = get_cmd_index(q, q->write_ptr, 0);
@@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
- free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+ iwl_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- priv->alloc_rxb_page--;
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __free_pages(rxq->pool[i].page,
- priv->hw_params.rx_page_order);
+ __iwl_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
- priv->alloc_rxb_page--;
}
}
@@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95) /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- * about formulas used below. */
-int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
- int sig_qual;
- int degradation = PERFECT_RSSI - rssi_dbm;
-
- /* If we get a noise measurement, use signal-to-noise ratio (SNR)
- * as indicator; formula is (signal dbm - noise dbm).
- * SNR at or above 40 is a great signal (100%).
- * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
- * Weakest usable signal is usually 10 - 15 dB SNR. */
- if (noise_dbm) {
- if (rssi_dbm - noise_dbm >= 40)
- return 100;
- else if (rssi_dbm < noise_dbm)
- return 0;
- sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
- /* Else use just the signal level.
- * This formula is a least squares fit of data points collected and
- * compared with a reference system that had a percentage (%) display
- * for signal quality. */
- } else
- sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
- (15 * RSSI_RANGE + 62 * degradation)) /
- (RSSI_RANGE * RSSI_RANGE);
-
- if (sig_qual > 100)
- sig_qual = 100;
- else if (sig_qual < 1)
- sig_qual = 0;
-
- return sig_qual;
-}
-
/**
* iwl3945_rx_handle - Main entry function for receiving responses from uCode
*
@@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
}
#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+ if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->retry_rate = 1;
priv->ibss_beacon = NULL;
- spin_lock_init(&priv->lock);
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
@@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ IEEE80211_HW_SPECTRUM_MGMT;
+
+ if (!priv->cfg->broken_powersave)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, 0x41, 0x00);
- /* this spin lock will be used in apm_ops.init and EEPROM access
+ /* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
/***********************
* 4. Read EEPROM
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 777584d..1e41ad0 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -973,6 +973,10 @@ int iwm_send_pmkid_update(struct iwm_priv *iwm,
memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
+ update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
+ update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
+ sizeof(struct iwm_umac_wifi_if));
+
update.command = cpu_to_le32(command);
if (pmksa->bssid)
memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 06af055..3dfd9f0 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -463,6 +463,7 @@ struct iwm_umac_cmd_stop_resume_tx {
#define IWM_CMD_PMKID_FLUSH 3
struct iwm_umac_pmkid_update {
+ struct iwm_umac_wifi_if hdr;
__le32 command;
u8 bssid[ETH_ALEN];
__le16 reserved;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 5a26bb0..8428111 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
- struct list_head rx_packets[IWM_RX_ID_HASH];
+ struct list_head rx_packets[IWM_RX_ID_HASH + 1];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
int iwm_down(struct iwm_priv *iwm);
/* TX API */
-u16 iwm_tid_to_queue(u16 tid);
+int iwm_tid_to_queue(u16 tid);
void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
void iwm_tx_worker(struct work_struct *work);
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index e4f0f87..c4c0d23 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
*/
static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-u16 iwm_tid_to_queue(u16 tid)
+int iwm_tid_to_queue(u16 tid)
{
if (tid > IWM_UMAC_TID_NR - 2)
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 1c57c1f..f727b4a 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
}
bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss) {
+ if (!bss->bss) {
kfree(bss);
IWM_ERR(iwm, "Couldn't allocate bss\n");
return -ENOMEM;
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
if (!stop) {
struct iwm_tx_queue *txq;
- u16 queue = iwm_tid_to_queue(bit);
+ int queue = iwm_tid_to_queue(bit);
if (queue < 0)
continue;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index b9b371b..42611be 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
priv->dnld_sent = DNLD_RES_RECEIVED;
/* If nothing to do, go back to sleep (?) */
- if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx])
+ if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
priv->psstate = PS_STATE_SLEEP;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
}
/* Pending events or command responses? */
- if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
+ if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
allowed = 0;
lbs_deb_host("pending events or command responses\n");
}
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6a8d2b2..05bb298 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -10,7 +10,7 @@
#include "scan.h"
#include "assoc.h"
-
+#include <linux/kfifo.h>
/** sleep_params */
struct sleep_params {
@@ -120,7 +120,7 @@ struct lbs_private {
u32 resp_len[2];
/* Events sent from hardware to driver */
- struct kfifo *event_fifo;
+ struct kfifo event_fifo;
/** thread to service interrupts */
struct task_struct *main_thread;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index db38a5a..c2975c8 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -459,7 +459,7 @@ static int lbs_thread(void *data)
else if (!list_empty(&priv->cmdpendingq) &&
!(priv->wakeup_dev_required))
shouldsleep = 0; /* We have a command to send */
- else if (__kfifo_len(priv->event_fifo))
+ else if (kfifo_len(&priv->event_fifo))
shouldsleep = 0; /* We have an event to process */
else
shouldsleep = 1; /* No command */
@@ -511,10 +511,13 @@ static int lbs_thread(void *data)
/* Process hardware events, e.g. card removed, link lost */
spin_lock_irq(&priv->driver_lock);
- while (__kfifo_len(priv->event_fifo)) {
+ while (kfifo_len(&priv->event_fifo)) {
u32 event;
- __kfifo_get(priv->event_fifo, (unsigned char *) &event,
- sizeof(event));
+
+ if (kfifo_out(&priv->event_fifo,
+ (unsigned char *) &event, sizeof(event)) !=
+ sizeof(event))
+ break;
spin_unlock_irq(&priv->driver_lock);
lbs_process_event(priv, event);
spin_lock_irq(&priv->driver_lock);
@@ -883,10 +886,9 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->resp_len[0] = priv->resp_len[1] = 0;
/* Create the event FIFO */
- priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL);
- if (IS_ERR(priv->event_fifo)) {
+ ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
+ if (ret) {
lbs_pr_err("Out of memory allocating event FIFO buffer\n");
- ret = -ENOMEM;
goto out;
}
@@ -901,8 +903,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MAIN);
lbs_free_cmd_buffer(priv);
- if (priv->event_fifo)
- kfifo_free(priv->event_fifo);
+ kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->auto_deepsleep_timer);
kfree(priv->networks);
@@ -1177,7 +1178,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event)
if (priv->psstate == PS_STATE_SLEEP)
priv->psstate = PS_STATE_AWAKE;
- __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32));
+ kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));
wake_up_interruptible(&priv->waitq);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 2f91c9b..92b7a35 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
#include <linux/kfifo.h>
@@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
- sizeof(priv->dev->dev_addr));
+ memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index c6a6c04..b0b1c78 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
chan_count = lbs_scan_create_channel_list(priv, chan_list);
netif_stop_queue(priv->dev);
- netif_carrier_off(priv->dev);
- if (priv->mesh_dev) {
+ if (priv->mesh_dev)
netif_stop_queue(priv->mesh_dev);
- netif_carrier_off(priv->mesh_dev);
- }
/* Prepare to continue an interrupted scan */
lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +632,13 @@ out2:
priv->scan_channel = 0;
out:
- if (priv->connect_status == LBS_CONNECTED) {
- netif_carrier_on(priv->dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->dev);
- }
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
+ if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
+ netif_wake_queue(priv->dev);
+
+ if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+ !priv->tx_pending_len)
+ netif_wake_queue(priv->mesh_dev);
+
kfree(chan_list);
lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index a8eb9e1..4b1aab5 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
if (priv->connect_status == LBS_CONNECTED) {
memcpy(extra, priv->curbssparams.ssid,
priv->curbssparams.ssid_len);
- extra[priv->curbssparams.ssid_len] = '\0';
} else {
memset(extra, 0, 32);
- extra[priv->curbssparams.ssid_len] = '\0';
}
/*
* If none, we may want to get the one that was set
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 019431d..26a1abd 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = prxpd->snr;
stats.noise = prxpd->nf;
- stats.qual = prxpd->snr - prxpd->nf;
/* Marvell rate index has a hole at value 4 */
if (prxpd->rx_rate > 4)
--prxpd->rx_rate;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59d4915..59f9210 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3157,8 +3157,10 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
- if (mwl8k_fw_lock(hw))
+ if (mwl8k_fw_lock(hw)) {
+ kfree(cmd);
return;
+ }
if (priv->sniffer_enabled) {
mwl8k_enable_sniffer(hw, 0);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 7698fdd..31ca241 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -23,7 +23,7 @@
#define MAX_RID_LEN 1024
/* Helper routine to record keys
- * Do not call from interrupt context */
+ * It is called under orinoco_lock so it may not sleep */
static int orinoco_set_key(struct orinoco_private *priv, int index,
enum orinoco_alg alg, const u8 *key, int key_len,
const u8 *seq, int seq_len)
@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
kzfree(priv->keys[index].seq);
if (key_len) {
- priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
+ priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
if (!priv->keys[index].key)
goto nomem;
} else
priv->keys[index].key = NULL;
if (seq_len) {
- priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
+ priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
if (!priv->keys[index].seq)
goto free_key;
} else
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a..a72f7c2 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -197,6 +197,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
i %= ring_limit;
continue;
}
+
+ if (unlikely(len > priv->common.rx_mtu)) {
+ if (net_ratelimit())
+ dev_err(&priv->pdev->dev, "rx'd frame size "
+ "exceeds length threshold.\n");
+
+ len = priv->common.rx_mtu;
+ }
skb_put(skb, len);
if (p54_rx(dev, skb)) {
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index c5fe867..1a7eae3 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1323,7 +1323,7 @@
#define PAIRWISE_KEY_ENTRY(__idx) \
( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
#define MAC_IVEIV_ENTRY(__idx) \
- ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+ ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
#define MAC_WCID_ATTR_ENTRY(__idx) \
( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
#define SHARED_KEY_ENTRY(__idx) \
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index eb1e1d0..9deae41 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
#include <linux/module.h>
#include "rt2x00.h"
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
#include "rt2x00usb.h"
#endif
#include "rt2800lib.h"
@@ -340,7 +340,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
@@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_intf_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u16 eeprom;
/*
+ * Disable powersaving as default on PCI devices.
+ */
+ if (rt2x00_intf_is_pci(rt2x00dev))
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
@@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_TX_STBC |
- IEEE80211_HT_CAP_RX_STBC |
- IEEE80211_HT_CAP_PSMP_SUPPORT;
+ IEEE80211_HT_CAP_RX_STBC;
spec->ht.ampdu_factor = 3;
spec->ht.ampdu_density = 4;
spec->ht.mcs.tx_params =
@@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
- memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
- memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+ memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
+ memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
}
static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index af85d18..ab95346 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 4d841c0..dcfc8c2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -113,6 +113,12 @@
( ((unsigned long)((__skb)->data + (__header))) & 3 )
/*
+ * Constants for extra TX headroom for alignment purposes.
+ */
+#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
+#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
+
+/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 06c43ca..265e66d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -686,7 +686,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Initialize extra TX headroom required.
*/
- rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+ rt2x00dev->hw->extra_tx_headroom =
+ max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
+ rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Take TX headroom required for alignment into account.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
+ else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 239afc7..9915a09 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -104,7 +104,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
- skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -112,7 +112,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
/*
* Restore data pointer to original location again.
*/
- skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
@@ -134,7 +134,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->hw->extra_tx_headroom,
+ skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 687e17d..0ca5893 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
unsigned int i;
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize all hw fields.
*/
rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index a1a3dd1..8a40a14 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.antenna = (flags2 >> 15) & 1;
/* TODO: improve signal/rssi reporting */
- rx_status.qual = flags2 & 0xFF;
rx_status.signal = (flags2 >> 8) & 0x7F;
/* XXX: is this correct? */
rx_status.rate_idx = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726d..7ba3052 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Sitecom */
{USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+ {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
/* Sphairon Access Systems GmbH */
{USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
/* Dick Smith Electronics */
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 2e733e7..28a8086 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
}
}
- if (loop >= INIT_LOOP) {
+ if (loop > INIT_LOOP) {
wl1251_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 886a9bc..c3385b3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -777,7 +777,7 @@ out:
return ret;
}
-static int wl1271_build_basic_rates(char *rates, u8 band)
+static int wl1271_build_basic_rates(u8 *rates, u8 band)
{
u8 index = 0;
@@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
return index;
}
-static int wl1271_build_extended_rates(char *rates, u8 band)
+static int wl1271_build_extended_rates(u8 *rates, u8 band)
{
u8 index = 0;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index dfa1b9bc..7ca95c4 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
return r;
}
-static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- static const u16 constants[] = {
- 715, 655, 585, 540, 470, 410, 360, 315,
- 270, 235, 205, 175, 150, 125, 105, 85,
- 65, 50, 40, 25, 15
- };
-
- int i;
- u32 x;
-
- /* It seems that their quality parameter is somehow per signal
- * and is now transferred per bit.
- */
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_24M:
- size *= 2;
- break;
- case ZD_OFDM_RATE_9M:
- case ZD_OFDM_RATE_18M:
- case ZD_OFDM_RATE_36M:
- case ZD_OFDM_RATE_54M:
- size *= 4;
- size /= 3;
- break;
- case ZD_OFDM_RATE_48M:
- size *= 3;
- size /= 2;
- break;
- default:
- return -EINVAL;
- }
-
- x = (10000 * status_quality)/size;
- for (i = 0; i < ARRAY_SIZE(constants); i++) {
- if (x > constants[i])
- break;
- }
-
- switch (zd_rate) {
- case ZD_OFDM_RATE_6M:
- case ZD_OFDM_RATE_9M:
- i += 3;
- break;
- case ZD_OFDM_RATE_12M:
- case ZD_OFDM_RATE_18M:
- i += 5;
- break;
- case ZD_OFDM_RATE_24M:
- case ZD_OFDM_RATE_36M:
- i += 9;
- break;
- case ZD_OFDM_RATE_48M:
- case ZD_OFDM_RATE_54M:
- i += 15;
- break;
- default:
- return -EINVAL;
- }
-
- return i;
-}
-
-static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
-{
- int r;
-
- r = ofdm_qual_db(status_quality, zd_rate, size);
- ZD_ASSERT(r >= 0);
- if (r < 0)
- r = 0;
-
- r = (r * 100)/29;
- return r <= 100 ? r : 100;
-}
-
-static unsigned int log10times100(unsigned int x)
-{
- static const u8 log10[] = {
- 0,
- 0, 30, 47, 60, 69, 77, 84, 90, 95, 100,
- 104, 107, 111, 114, 117, 120, 123, 125, 127, 130,
- 132, 134, 136, 138, 139, 141, 143, 144, 146, 147,
- 149, 150, 151, 153, 154, 155, 156, 157, 159, 160,
- 161, 162, 163, 164, 165, 166, 167, 168, 169, 169,
- 170, 171, 172, 173, 174, 174, 175, 176, 177, 177,
- 178, 179, 179, 180, 181, 181, 182, 183, 183, 184,
- 185, 185, 186, 186, 187, 188, 188, 189, 189, 190,
- 190, 191, 191, 192, 192, 193, 193, 194, 194, 195,
- 195, 196, 196, 197, 197, 198, 198, 199, 199, 200,
- 200, 200, 201, 201, 202, 202, 202, 203, 203, 204,
- 204, 204, 205, 205, 206, 206, 206, 207, 207, 207,
- 208, 208, 208, 209, 209, 210, 210, 210, 211, 211,
- 211, 212, 212, 212, 213, 213, 213, 213, 214, 214,
- 214, 215, 215, 215, 216, 216, 216, 217, 217, 217,
- 217, 218, 218, 218, 219, 219, 219, 219, 220, 220,
- 220, 220, 221, 221, 221, 222, 222, 222, 222, 223,
- 223, 223, 223, 224, 224, 224, 224,
- };
-
- return x < ARRAY_SIZE(log10) ? log10[x] : 225;
-}
-
-enum {
- MAX_CCK_EVM_DB = 45,
-};
-
-static int cck_evm_db(u8 status_quality)
-{
- return (20 * log10times100(status_quality)) / 100;
-}
-
-static int cck_snr_db(u8 status_quality)
-{
- int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
- ZD_ASSERT(r >= 0);
- return r;
-}
-
-static int cck_qual_percent(u8 status_quality)
-{
- int r;
-
- r = cck_snr_db(status_quality);
- r = (100*r)/17;
- return r <= 100 ? r : 100;
-}
-
static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
{
return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
}
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status)
-{
- return (status->frame_status&ZD_RX_OFDM) ?
- ofdm_qual_percent(status->signal_quality_ofdm,
- zd_rate_from_ofdm_plcp_header(rx_frame),
- size) :
- cck_qual_percent(status->signal_quality_cck);
-}
-
/**
* zd_rx_rate - report zd-rate
* @rx_frame - received frame
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 9fd8f35..f8bbf7d 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
struct rx_status;
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
- const struct rx_status *status);
-
u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
struct zd_mc_hash {
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cf51e8f..f14deb0 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
stats.signal = status->signal_strength;
- stats.qual = zd_rx_qual_percent(buffer,
- length - sizeof(struct rx_status),
- status);
rate = zd_rx_rate(buffer, status);
@@ -990,12 +987,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- /* changed_flags is always populated but this driver
- * doesn't support all FIF flags so its possible we don't
- * need to do anything */
- if (!changed_flags)
- return;
-
+ /*
+ * If multicast parameter (as returned by zd_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd..72d3e43 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -62,6 +62,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e952fd..cb2fd01 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
-ret_val);
goto acpiphp_bus_add_out;
}
- /*
- * try to start anyway. We could have failed to add
- * simply because this bus had previously been added
- * on another add. Don't bother with the return value
- * we just keep going.
- */
ret_val = acpi_bus_start(device);
acpiphp_bus_add_out:
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index bd588eb..8e210cd7 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -121,7 +121,7 @@ struct controller {
#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
-/* AMD PCIX bridge registers */
+/* AMD PCI-X bridge registers */
#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C
#define PCIX_MISCII_OFFSET 0x48
#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index e56f9be..4173125 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -305,7 +305,7 @@ struct device_domain_info {
int segment; /* PCI domain */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
- struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
return ret;
parent = parent->bus->self;
}
- if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
tmp->subordinate->number, 0,
@@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
parent->devfn);
parent = parent->bus->self;
}
- if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 8b65a48..95b8491 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
bridge = pci_find_upstream_pcie_bridge(dev);
if (bridge) {
- if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
+ if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
(bridge->bus->number << 8) | dev->bus->number);
else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index cc617dd..7e28295 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
{
while (bus->parent) {
- struct pci_dev *bridge = bus->self;
- int ret;
-
- ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
- if (!ret || pci_is_pcie(bridge))
+ if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
return;
bus = bus->parent;
}
@@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
if (acpi_pci_can_wakeup(dev))
return acpi_pm_device_sleep_wake(&dev->dev, enable);
- if (!pci_is_pcie(dev))
- acpi_pci_propagate_wakeup_enable(dev->bus, enable);
-
+ acpi_pci_propagate_wakeup_enable(dev->bus, enable);
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c5df94e..807224e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev,
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
@@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev,
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0bc27e0..315fea4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -29,7 +29,17 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
@@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
@@ -1153,11 +1163,11 @@ pci_disable_device(struct pci_dev *dev)
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1168,7 +1178,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
/**
* pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
@@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
@@ -2296,6 +2307,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
down(&dev->dev.sem);
}
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2779,6 +2794,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
static int __init pci_setup(char *str)
{
while (str) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 33ed8e0..fbd0e3a 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
extern void pci_enable_acs(struct pci_dev *dev);
+struct pci_dev_reset_methods {
+ u16 vendor;
+ u16 device;
+ int (*reset)(struct pci_dev *dev, int probe);
+};
+
+extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
index b8c925c..9142949 100644
--- a/drivers/pci/pcie/aer/Kconfig.debug
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -3,14 +3,14 @@
#
config PCIEAER_INJECT
- tristate "PCIE AER error injector support"
+ tristate "PCIe AER error injector support"
depends on PCIEAER
default n
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
- Debuging PCIE AER code is quite difficult because it is hard
+ Debugging PCIe AER code is quite difficult because it is hard
to trigger various real hardware errors. Software based
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 7fcd5331..223052b 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -1,7 +1,7 @@
/*
- * PCIE AER software error injection support.
+ * PCIe AER software error injection support.
*
- * Debuging PCIE AER code is quite difficult because it is hard to
+ * Debuging PCIe AER code is quite difficult because it is hard to
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj)
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever;
+ u32 sever, cor_mask, uncor_mask;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -339,6 +339,9 @@ static int aer_inject(struct aer_error_inj *einj)
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ &uncor_mask);
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
@@ -374,6 +377,21 @@ static int aer_inject(struct aer_error_inj *einj)
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
+ if (einj->cor_status && !(einj->cor_status & ~cor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The correctable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+ if (einj->uncor_status && !(einj->uncor_status & ~uncor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The uncorrectable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
rperr = __find_aer_error_by_dev(rpdev);
if (!rperr) {
rperr = rperr_alloc;
@@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj)
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev))
+ if (find_aer_device(rpdev, &edev)) {
+ if (!get_service_data(edev)) {
+ printk(KERN_WARNING "AER service is not initialized\n");
+ ret = -EINVAL;
+ goto out_put;
+ }
aer_irq(-1, edev);
+ }
else
ret = -EINVAL;
out_put:
@@ -484,5 +508,5 @@ static void __exit aer_inject_exit(void)
module_init(aer_inject_init);
module_exit(aer_inject_exit);
-MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_DESCRIPTION("PCIe AER software error injector");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 97a3459..21f215f 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
- /* Use PCIE bus function to store rpc into PCIE device */
+ /* Use PCIe bus function to store rpc into PCIe device */
set_service_data(dev, rpc);
return rpc;
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 8edb2f3..0481408 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -24,7 +24,7 @@
*
* @return: Zero on success. Nonzero otherwise.
*
- * Invoked when PCIE bus loads AER service driver. To avoid conflict with
+ * Invoked when PCIe bus loads AER service driver. To avoid conflict with
* BIOS AER support requires BIOS to yield AER control to OS native driver.
**/
int aer_osc_setup(struct pcie_device *pciedev)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ae672ca..c843a79 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev,
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
- * Invoked when PCIE bus loads AER service driver.
+ * Invoked when PCIe bus loads AER service driver.
*/
void aer_enable_rootport(struct aer_rpc *rpc)
{
@@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
u32 reg32;
pos = pci_pcie_cap(pdev);
- /* Clear PCIE Capability's Device Status */
+ /* Clear PCIe Capability's Device Status */
pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
@@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
* disable_root_aer - disable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
*
- * Invoked when PCIE bus unloads AER service driver.
+ * Invoked when PCIe bus unloads AER service driver.
*/
static void disable_root_aer(struct aer_rpc *rpc)
{
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 44acde7..9d3e4c8 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
if (info->status == 0) {
AER_PR(info, dev,
- "PCIE Bus Error: severity=%s, type=Unaccessible, "
+ "PCIe Bus Error: severity=%s, type=Unaccessible, "
"id=%04x(Unregistered Agent ID)\n",
aer_error_severity_string[info->severity], id);
} else {
@@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
agent = AER_GET_AGENT(info->severity, info->status);
AER_PR(info, dev,
- "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], id, aer_agent_string[agent]);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a01fc7..be53d98 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,6 +1,6 @@
/*
* File: drivers/pci/pcie/aspm.c
- * Enabling PCIE link L0s/L1 state and Clock Power Management
+ * Enabling PCIe link L0s/L1 state and Clock Power Management
*
* Copyright (C) 2007 Intel
* Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
/*
- * Some functions in a slot might not all be PCIE functions,
+ * Some functions in a slot might not all be PCIe functions,
* very strange. Disable ASPM for the whole slot
*/
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 413262e..b174188 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -27,7 +27,7 @@
*/
static void release_pcie_device(struct device *dev)
{
- kfree(to_pcie_device(dev));
+ kfree(to_pcie_device(dev));
}
/**
@@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
- if ((dev->bus == &pcie_port_bus_type) &&
- (dev->driver)) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev));
- }
+ if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
+ service_driver = to_service_driver(dev->driver);
+ if (service_driver->suspend)
+ service_driver->suspend(to_pcie_device(dev));
+ }
return 0;
}
@@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
return driver_register(&new->driver);
}
+EXPORT_SYMBOL(pcie_port_service_register);
/**
* pcie_port_service_unregister - unregister PCI Express port service driver
@@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
{
driver_unregister(&drv->driver);
}
-
-EXPORT_SYMBOL(pcie_port_service_register);
EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index a49452e..13c8972 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -24,7 +24,7 @@
*/
#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "PCIE Port Bus Driver"
+#define DRIVER_DESC "PCIe Port Bus Driver"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
@@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
*
- * If detected invokes the pcie_port_device_register() method for
+ * If detected invokes the pcie_port_device_register() method for
* this port device.
*
*/
@@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
(dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENODEV;
- if (!dev->irq && dev->pin) {
+ if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
"check vendor BIOS\n", dev->vendor, dev->device);
}
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
return 0;
}
-static void pcie_portdrv_remove (struct pci_dev *dev)
+static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
pci_disable_device(dev);
@@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
- struct aer_broadcast_data result_data =
- {error, PCI_ERS_RESULT_CAN_RECOVER};
- int retval;
+ struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
+ int ret;
/* can not fail */
- retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter);
+ ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
- return result_data.result;
+ return data.result;
}
static int mmio_enabled_iter(struct device *device, void *data)
@@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void)
return retval;
}
-static void __exit pcie_portdrv_exit(void)
+static void __exit pcie_portdrv_exit(void)
{
pci_unregister_driver(&pcie_portdriver);
pcie_port_bus_unregister();
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 98ffb2d..446e4a9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -681,7 +681,7 @@ static void pci_read_irq(struct pci_dev *dev)
dev->irq = irq;
}
-static void set_pcie_port_type(struct pci_dev *pdev)
+void set_pcie_port_type(struct pci_dev *pdev)
{
int pos;
u16 reg16;
@@ -695,7 +695,7 @@ static void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
}
-static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+void set_pcie_hotplug_bridge(struct pci_dev *pdev)
{
int pos;
u16 reg16;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7cfa7c3..d58b940 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
+/*
+ * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
+ * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
+ * BAR0 should be 8 bytes; instead, it may be set to something like 8k
+ * (which conflicts w/ BAR1's memory range).
+ */
+static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
+{
+ if (pci_resource_len(dev, 0) != 8) {
+ struct resource *res = &dev->resource[0];
+ res->end = res->start + 8 - 1;
+ dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
+ "(incorrect header); workaround applied.\n");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+
static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
unsigned size, int nr, const char *name)
{
@@ -2629,14 +2646,86 @@ static int __init pci_apply_final_quirks(void)
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
- pci_cache_line_size = cls;
+ pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
return 0;
}
fs_initcall_sync(pci_apply_final_quirks);
+
+/*
+ * Followings are device-specific reset methods which can be used to
+ * reset a single function if other methods (e.g. FLR, PM D0->D3) are
+ * not available.
+ */
+static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
+{
+ int pos;
+
+ /* only implement PCI_CLASS_SERIAL_USB at present */
+ if (dev->class == PCI_CLASS_SERIAL_USB) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ if (!pos)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_write_config_byte(dev, pos + 0x4, 1);
+ msleep(100);
+
+ return 0;
+ } else {
+ return -ENOTTY;
+ }
+}
+
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+{
+ int pos;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+
+ return 0;
+}
+
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
+
+static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+ reset_intel_82599_sfp_virtfn },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ reset_intel_generic_dev },
+ { 0 }
+};
+
+int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+ const struct pci_dev_reset_methods *i;
+
+ for (i = pci_dev_reset_methods; i->reset; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID))
+ return i->reset(dev, probe);
+ }
+
+ return -ENOTTY;
+}
+
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
+int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
#endif
EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 6dae871..4a471dc 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,9 +15,9 @@
DECLARE_RWSEM(pci_bus_sem);
/*
- * find the upstream PCIE-to-PCI bridge of a PCI device
+ * find the upstream PCIe-to-PCI bridge of a PCI device
* if the device is PCIE, return NULL
- * if the device isn't connected to a PCIE bridge (that is its parent is a
+ * if the device isn't connected to a PCIe bridge (that is its parent is a
* legacy PCI bridge and the bridge is directly connected to bus 0), return its
* parent
*/
@@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
tmp = pdev;
continue;
}
- /* PCI device should connect to a PCIE bridge */
+ /* PCI device should connect to a PCIe bridge */
if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
/* Busted hardware? */
WARN_ON_ONCE(1);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index cdf50f3..d99f846 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
-/* pcibios_fixup_bus(bus); */
+ pci_fixup_cardbus(bus);
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3aabf1e..76e640b 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -291,7 +291,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
skt->nr = ops->first + i;
skt->ops = ops;
skt->socket.owner = ops->owner;
- skt->socket.dev.parent = dev;
+ skt->socket.dev.parent = &dev->dev;
skt->socket.pci_irq = NO_IRQ;
ret = pxa2xx_drv_pcmcia_add_one(skt);
@@ -304,8 +304,8 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
soc_pcmcia_remove_one(&sinfo->skt[i]);
kfree(sinfo);
} else {
- pxa2xx_configure_sockets(dev);
- dev_set_drvdata(dev, sinfo);
+ pxa2xx_configure_sockets(&dev->dev);
+ dev_set_drvdata(&dev->dev, sinfo);
}
return ret;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 55ca39d..f526e73 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -231,8 +231,36 @@ config THINKPAD_ACPI
This driver was formerly known as ibm-acpi.
+ Extra functionality will be available if the rfkill (CONFIG_RFKILL)
+ and/or ALSA (CONFIG_SND) subsystems are available in the kernel.
+ Note that if you want ThinkPad-ACPI to be built-in instead of
+ modular, ALSA and rfkill will also have to be built-in.
+
If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
+config THINKPAD_ACPI_ALSA_SUPPORT
+ bool "Console audio control ALSA interface"
+ depends on THINKPAD_ACPI
+ depends on SND
+ depends on SND = y || THINKPAD_ACPI = SND
+ default y
+ ---help---
+ Enables monitoring of the built-in console audio output control
+ (headphone and speakers), which is operated by the mute and (in
+ some ThinkPad models) volume hotkeys.
+
+ If this option is enabled, ThinkPad-ACPI will export an ALSA card
+ with a single read-only mixer control, which should be used for
+ on-screen-display feedback purposes by the Desktop Environment.
+
+ Optionally, the driver will also allow software control (the
+ ALSA mixer will be made read-write). Please refer to the driver
+ documentation for details.
+
+ All IBM models have both volume and mute control. Newer Lenovo
+ models only have mute control (the volume hotkeys are just normal
+ keys and volume control is done through the main HDA mixer).
+
config THINKPAD_ACPI_DEBUGFACILITIES
bool "Maintainer debug facilities"
depends on THINKPAD_ACPI
@@ -334,6 +362,9 @@ config EEEPC_LAPTOP
depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
+ select LEDS_CLASS
+ select NEW_LEDS
+ select INPUT_SPARSEKMAP
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
@@ -365,6 +396,18 @@ config ACPI_WMI
It is safe to enable this driver even if your DSDT doesn't define
any ACPI-WMI devices.
+config MSI_WMI
+ tristate "MSI WMI extras"
+ depends on ACPI_WMI
+ depends on INPUT
+ depends on BACKLIGHT_CLASS_DEVICE
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on MSI laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called msi-wmi.
+
config ACPI_ASUS
tristate "ASUS/Medion Laptop Extras (DEPRECATED)"
depends on ACPI
@@ -435,4 +478,31 @@ config ACPI_TOSHIBA
If you have a legacy free Toshiba laptop (such as the Libretto L1
series), say Y.
+
+config TOSHIBA_BT_RFKILL
+ tristate "Toshiba Bluetooth RFKill switch support"
+ depends on ACPI
+ ---help---
+ This driver adds support for Bluetooth events for the RFKill
+ switch on modern Toshiba laptops with full ACPI support and
+ an RFKill switch.
+
+ This driver handles RFKill events for the TOS6205 Bluetooth,
+ and re-enables it when the switch is set back to the 'on'
+ position.
+
+ If you have a modern Toshiba laptop with a Bluetooth and an
+ RFKill switch (such as the Portege R500), say Y.
+
+config ACPI_CMPC
+ tristate "CMPC Laptop Extras"
+ depends on X86 && ACPI
+ select INPUT
+ select BACKLIGHT_CLASS_DEVICE
+ default n
+ help
+ Support for Intel Classmate PC ACPI devices, including some
+ keys as input device, backlight device, tablet and accelerometer
+ devices.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index d1c1621..9cd9fa0 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
@@ -18,6 +19,8 @@ obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_MSI_WMI) += msi-wmi.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 454970d..226b3e9 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -96,9 +96,6 @@ struct acer_quirks {
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
-/* Temporary workaround until the WMI sysfs interface goes in */
-MODULE_ALIAS("dmi:*:*Acer*:*:");
-
/*
* Interface capability flags
*/
@@ -937,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
acer_backlight_device = bd;
bd->props.power = FB_BLANK_UNBLANK;
- bd->props.brightness = max_brightness;
+ bd->props.brightness = read_brightness(bd);
bd->props.max_brightness = max_brightness;
backlight_update_status(bd);
return 0;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index be27aa4..7b2384d 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -52,7 +52,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.18"
+#define DRV_VER "0.5.22"
/*
* According to the Atom N270 datasheet,
@@ -112,12 +112,14 @@ module_param_string(force_product, force_product, 16, 0);
MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
/*
- * cmd_off: to switch the fan completely off / to check if the fan is off
+ * cmd_off: to switch the fan completely off
+ * chk_off: to check if the fan is off
* cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
* the fan speed depending on the temperature
*/
struct fancmd {
u8 cmd_off;
+ u8 chk_off;
u8 cmd_auto;
};
@@ -134,32 +136,47 @@ struct bios_settings_t {
/* Register addresses and values for different BIOS versions */
static const struct bios_settings_t bios_tbl[] = {
/* AOA110 */
- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} },
/* AOA150 */
- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
- /* special BIOS / other */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ /* Acer 1410 */
+ {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ /* Acer 1810xx */
+ {"Acer", "Aspire 1810TZ", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Acer", "Aspire 1810T", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Acer", "Aspire 1810T", "v1.3303", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v1.3303", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ /* Gateway */
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x10, 0x0f, 0x00} },
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x10, 0x0f, 0x00} },
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x10, 0x0f, 0x00} },
+ /* Packard Bell */
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
/* pewpew-terminator */
- {"", "", "", 0, 0, {0, 0} }
+ {"", "", "", 0, 0, {0, 0, 0} }
};
static const struct bios_settings_t *bios_cfg __read_mostly;
@@ -183,7 +200,7 @@ static int acerhdf_get_fanstate(int *state)
if (ec_read(bios_cfg->fanreg, &fan))
return -EINVAL;
- if (fan != bios_cfg->cmd.cmd_off)
+ if (fan != bios_cfg->cmd.chk_off)
*state = ACERHDF_FAN_AUTO;
else
*state = ACERHDF_FAN_OFF;
@@ -475,13 +492,26 @@ static struct platform_driver acerhdf_driver = {
.remove = acerhdf_remove,
};
+/* checks if str begins with start */
+static int str_starts_with(const char *str, const char *start)
+{
+ unsigned long str_len = 0, start_len = 0;
+
+ str_len = strlen(str);
+ start_len = strlen(start);
+
+ if (str_len >= start_len &&
+ !strncmp(str, start, start_len))
+ return 1;
+
+ return 0;
+}
/* check hardware */
static int acerhdf_check_hardware(void)
{
char const *vendor, *version, *product;
- int i;
- unsigned long prod_len = 0;
+ const struct bios_settings_t *bt = NULL;
/* get BIOS data */
vendor = dmi_get_system_info(DMI_SYS_VENDOR);
@@ -503,20 +533,20 @@ static int acerhdf_check_hardware(void)
kernelmode = 0;
}
- prod_len = strlen(product);
-
if (verbose)
pr_info("BIOS info: %s %s, product: %s\n",
vendor, version, product);
/* search BIOS version and vendor in BIOS settings table */
- for (i = 0; bios_tbl[i].version[0]; i++) {
- if (strlen(bios_tbl[i].product) >= prod_len &&
- !strncmp(bios_tbl[i].product, product,
- strlen(bios_tbl[i].product)) &&
- !strcmp(bios_tbl[i].vendor, vendor) &&
- !strcmp(bios_tbl[i].version, version)) {
- bios_cfg = &bios_tbl[i];
+ for (bt = bios_tbl; bt->vendor[0]; bt++) {
+ /*
+ * check if actual hardware BIOS vendor, product and version
+ * IDs start with the strings of BIOS table entry
+ */
+ if (str_starts_with(vendor, bt->vendor) &&
+ str_starts_with(product, bt->product) &&
+ str_starts_with(version, bt->version)) {
+ bios_cfg = bt;
break;
}
}
@@ -629,9 +659,14 @@ static void __exit acerhdf_exit(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
-MODULE_ALIAS("dmi:*:*Acer*:*:");
-MODULE_ALIAS("dmi:*:*Gateway*:*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:");
+MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index b39d2bb..61a1c75 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -221,6 +221,7 @@ static struct asus_hotk *hotk;
*/
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
+ {"ATK0101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, asus_device_ids);
@@ -232,6 +233,7 @@ static void asus_hotk_notify(struct acpi_device *device, u32 event);
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
+ .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
@@ -293,6 +295,11 @@ struct key_entry {
enum { KE_KEY, KE_END };
static struct key_entry asus_keymap[] = {
+ {KE_KEY, 0x02, KEY_SCREENLOCK},
+ {KE_KEY, 0x05, KEY_WLAN},
+ {KE_KEY, 0x08, KEY_F13},
+ {KE_KEY, 0x17, KEY_ZOOM},
+ {KE_KEY, 0x1f, KEY_BATTERY},
{KE_KEY, 0x30, KEY_VOLUMEUP},
{KE_KEY, 0x31, KEY_VOLUMEDOWN},
{KE_KEY, 0x32, KEY_MUTE},
@@ -312,8 +319,11 @@ static struct key_entry asus_keymap[] = {
{KE_KEY, 0x5F, KEY_WLAN},
{KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
+ {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
+ {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
+ {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */
{KE_KEY, 0x82, KEY_CAMERA},
+ {KE_KEY, 0x88, KEY_WLAN },
{KE_KEY, 0x8A, KEY_PROG1},
{KE_KEY, 0x95, KEY_MEDIA},
{KE_KEY, 0x99, KEY_PHONE},
@@ -1240,9 +1250,6 @@ static int asus_hotk_add(struct acpi_device *device)
{
int result;
- if (!device)
- return -EINVAL;
-
pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
@@ -1283,8 +1290,8 @@ static int asus_hotk_add(struct acpi_device *device)
hotk->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- hotk->light_switch = 1; /* Default to light sensor disabled */
- hotk->light_level = 0; /* level 5 for sensor sensitivity */
+ hotk->light_switch = 0; /* Default to light sensor disabled */
+ hotk->light_level = 5; /* level 5 for sensor sensitivity */
if (ls_switch_handle)
set_light_sens_switch(hotk->light_switch);
@@ -1306,9 +1313,6 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
kfree(hotk->name);
kfree(hotk);
@@ -1444,9 +1448,6 @@ static int __init asus_laptop_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
-
result = acpi_bus_register_driver(&asus_hotk_driver);
if (result < 0)
return result;
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ddf5240..c1d2aee 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/backlight.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
@@ -466,6 +467,7 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static struct acpi_driver asus_hotk_driver = {
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
+ .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
@@ -512,26 +514,12 @@ static int read_acpi_int(acpi_handle handle, const char *method, int *val)
return (status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER);
}
-/*
- * We write our info in page, we begin at offset off and cannot write more
- * than count bytes. We set eof to 1 if we handle those 2 values. We return the
- * number of bytes written in page
- */
-static int
-proc_read_info(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int asus_info_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
int temp;
- char buf[16]; /* enough for all info */
- /*
- * We use the easy way, we don't care of off and count,
- * so we don't set eof to 1
- */
- len += sprintf(page, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n");
- len += sprintf(page + len, "Model reference : %s\n",
- hotk->methods->name);
+ seq_printf(m, ACPI_HOTK_NAME " " ASUS_ACPI_VERSION "\n");
+ seq_printf(m, "Model reference : %s\n", hotk->methods->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
@@ -539,8 +527,7 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
* The significance of others is yet to be found.
*/
if (read_acpi_int(hotk->handle, "SFUN", &temp))
- len +=
- sprintf(page + len, "SFUN value : 0x%04x\n", temp);
+ seq_printf(m, "SFUN value : 0x%04x\n", temp);
/*
* Another value for userspace: the ASYM method returns 0x02 for
* battery low and 0x04 for battery critical, its readings tend to be
@@ -549,30 +536,34 @@ proc_read_info(char *page, char **start, off_t off, int count, int *eof,
* silently ignored.
*/
if (read_acpi_int(hotk->handle, "ASYM", &temp))
- len +=
- sprintf(page + len, "ASYM value : 0x%04x\n", temp);
+ seq_printf(m, "ASYM value : 0x%04x\n", temp);
if (asus_info) {
- snprintf(buf, 16, "%d", asus_info->length);
- len += sprintf(page + len, "DSDT length : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->checksum);
- len += sprintf(page + len, "DSDT checksum : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->revision);
- len += sprintf(page + len, "DSDT revision : %s\n", buf);
- snprintf(buf, 7, "%s", asus_info->oem_id);
- len += sprintf(page + len, "OEM id : %s\n", buf);
- snprintf(buf, 9, "%s", asus_info->oem_table_id);
- len += sprintf(page + len, "OEM table id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->oem_revision);
- len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
- snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
- len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
- len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
+ seq_printf(m, "DSDT length : %d\n", asus_info->length);
+ seq_printf(m, "DSDT checksum : %d\n", asus_info->checksum);
+ seq_printf(m, "DSDT revision : %d\n", asus_info->revision);
+ seq_printf(m, "OEM id : %.*s\n", ACPI_OEM_ID_SIZE, asus_info->oem_id);
+ seq_printf(m, "OEM table id : %.*s\n", ACPI_OEM_TABLE_ID_SIZE, asus_info->oem_table_id);
+ seq_printf(m, "OEM revision : 0x%x\n", asus_info->oem_revision);
+ seq_printf(m, "ASL comp vendor id : %.*s\n", ACPI_NAME_SIZE, asus_info->asl_compiler_id);
+ seq_printf(m, "ASL comp revision : 0x%x\n", asus_info->asl_compiler_revision);
}
- return len;
+ return 0;
+}
+
+static int asus_info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, asus_info_proc_show, NULL);
}
+static const struct file_operations asus_info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = asus_info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* /proc handlers
* We write our info in page, we begin at offset off and cannot write more
@@ -638,34 +629,48 @@ write_led(const char __user *buffer, unsigned long count,
/*
* Proc handlers for MLED
*/
-static int
-proc_read_mled(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int mled_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n",
- read_led(hotk->methods->mled_status, MLED_ON));
+ seq_printf(m, "%d\n", read_led(hotk->methods->mled_status, MLED_ON));
+ return 0;
}
-static int
-proc_write_mled(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int mled_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mled_proc_show, NULL);
+}
+
+static ssize_t mled_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
return write_led(buffer, count, hotk->methods->mt_mled, MLED_ON, 1);
}
+static const struct file_operations mled_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mled_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mled_proc_write,
+};
+
/*
* Proc handlers for LED display
*/
-static int
-proc_read_ledd(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int ledd_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "0x%08x\n", hotk->ledd_status);
+ seq_printf(m, "0x%08x\n", hotk->ledd_status);
+ return 0;
}
-static int
-proc_write_ledd(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int ledd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ledd_proc_show, NULL);
+}
+
+static ssize_t ledd_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int rv, value;
@@ -681,61 +686,104 @@ proc_write_ledd(struct file *file, const char __user *buffer,
return rv;
}
+static const struct file_operations ledd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ledd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = ledd_proc_write,
+};
+
/*
* Proc handlers for WLED
*/
-static int
-proc_read_wled(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int wled_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n",
- read_led(hotk->methods->wled_status, WLED_ON));
+ seq_printf(m, "%d\n", read_led(hotk->methods->wled_status, WLED_ON));
+ return 0;
}
-static int
-proc_write_wled(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int wled_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wled_proc_show, NULL);
+}
+
+static ssize_t wled_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
return write_led(buffer, count, hotk->methods->mt_wled, WLED_ON, 0);
}
+static const struct file_operations wled_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = wled_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = wled_proc_write,
+};
+
/*
* Proc handlers for Bluetooth
*/
-static int
-proc_read_bluetooth(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int bluetooth_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n", read_led(hotk->methods->bt_status, BT_ON));
+ seq_printf(m, "%d\n", read_led(hotk->methods->bt_status, BT_ON));
+ return 0;
}
-static int
-proc_write_bluetooth(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int bluetooth_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bluetooth_proc_show, NULL);
+}
+
+static ssize_t bluetooth_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
/* Note: mt_bt_switch controls both internal Bluetooth adapter's
presence and its LED */
return write_led(buffer, count, hotk->methods->mt_bt_switch, BT_ON, 0);
}
+static const struct file_operations bluetooth_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = bluetooth_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = bluetooth_proc_write,
+};
+
/*
* Proc handlers for TLED
*/
-static int
-proc_read_tled(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int tled_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n",
- read_led(hotk->methods->tled_status, TLED_ON));
+ seq_printf(m, "%d\n", read_led(hotk->methods->tled_status, TLED_ON));
+ return 0;
}
-static int
-proc_write_tled(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int tled_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tled_proc_show, NULL);
+}
+
+static ssize_t tled_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
return write_led(buffer, count, hotk->methods->mt_tled, TLED_ON, 0);
}
+static const struct file_operations tled_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = tled_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = tled_proc_write,
+};
+
static int get_lcd_state(void)
{
int lcd = 0;
@@ -828,16 +876,19 @@ static int set_lcd_state(int value)
}
-static int
-proc_read_lcd(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int lcd_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n", get_lcd_state());
+ seq_printf(m, "%d\n", get_lcd_state());
+ return 0;
}
-static int
-proc_write_lcd(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int lcd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lcd_proc_show, NULL);
+}
+
+static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int rv, value;
@@ -847,6 +898,15 @@ proc_write_lcd(struct file *file, const char __user *buffer,
return rv;
}
+static const struct file_operations lcd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = lcd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = lcd_proc_write,
+};
+
static int read_brightness(struct backlight_device *bd)
{
int value;
@@ -906,16 +966,19 @@ static int set_brightness_status(struct backlight_device *bd)
return set_brightness(bd->props.brightness);
}
-static int
-proc_read_brn(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int brn_proc_show(struct seq_file *m, void *v)
{
- return sprintf(page, "%d\n", read_brightness(NULL));
+ seq_printf(m, "%d\n", read_brightness(NULL));
+ return 0;
}
-static int
-proc_write_brn(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int brn_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, brn_proc_show, NULL);
+}
+
+static ssize_t brn_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int rv, value;
@@ -928,6 +991,15 @@ proc_write_brn(struct file *file, const char __user *buffer,
return rv;
}
+static const struct file_operations brn_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = brn_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = brn_proc_write,
+};
+
static void set_display(int value)
{
/* no sanity check needed for now */
@@ -941,9 +1013,7 @@ static void set_display(int value)
* Now, *this* one could be more user-friendly, but so far, no-one has
* complained. The significance of bits is the same as in proc_write_disp()
*/
-static int
-proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int disp_proc_show(struct seq_file *m, void *v)
{
int value = 0;
@@ -951,7 +1021,13 @@ proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
printk(KERN_WARNING
"Asus ACPI: Error reading display status\n");
value &= 0x07; /* needed for some models, shouldn't hurt others */
- return sprintf(page, "%d\n", value);
+ seq_printf(m, "%d\n", value);
+ return 0;
+}
+
+static int disp_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, disp_proc_show, NULL);
}
/*
@@ -960,9 +1036,8 @@ proc_read_disp(char *page, char **start, off_t off, int count, int *eof,
* (bitwise) of these will suffice. I never actually tested 3 displays hooked
* up simultaneously, so be warned. See the acpi4asus README for more info.
*/
-static int
-proc_write_disp(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t disp_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int rv, value;
@@ -972,25 +1047,27 @@ proc_write_disp(struct file *file, const char __user *buffer,
return rv;
}
-typedef int (proc_readfunc) (char *page, char **start, off_t off, int count,
- int *eof, void *data);
-typedef int (proc_writefunc) (struct file *file, const char __user *buffer,
- unsigned long count, void *data);
+static const struct file_operations disp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = disp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = disp_proc_write,
+};
static int
-asus_proc_add(char *name, proc_writefunc *writefunc,
- proc_readfunc *readfunc, mode_t mode,
+asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
struct acpi_device *device)
{
- struct proc_dir_entry *proc =
- create_proc_entry(name, mode, acpi_device_dir(device));
+ struct proc_dir_entry *proc;
+
+ proc = proc_create_data(name, mode, acpi_device_dir(device),
+ proc_fops, acpi_driver_data(device));
if (!proc) {
printk(KERN_WARNING " Unable to create %s fs entry\n", name);
return -1;
}
- proc->write_proc = writefunc;
- proc->read_proc = readfunc;
- proc->data = acpi_driver_data(device);
proc->uid = asus_uid;
proc->gid = asus_gid;
return 0;
@@ -1019,10 +1096,9 @@ static int asus_hotk_add_fs(struct acpi_device *device)
if (!acpi_device_dir(device))
return -ENODEV;
- proc = create_proc_entry(PROC_INFO, mode, acpi_device_dir(device));
+ proc = proc_create(PROC_INFO, mode, acpi_device_dir(device),
+ &asus_info_proc_fops);
if (proc) {
- proc->read_proc = proc_read_info;
- proc->data = acpi_driver_data(device);
proc->uid = asus_uid;
proc->gid = asus_gid;
} else {
@@ -1031,28 +1107,23 @@ static int asus_hotk_add_fs(struct acpi_device *device)
}
if (hotk->methods->mt_wled) {
- asus_proc_add(PROC_WLED, &proc_write_wled, &proc_read_wled,
- mode, device);
+ asus_proc_add(PROC_WLED, &wled_proc_fops, mode, device);
}
if (hotk->methods->mt_ledd) {
- asus_proc_add(PROC_LEDD, &proc_write_ledd, &proc_read_ledd,
- mode, device);
+ asus_proc_add(PROC_LEDD, &ledd_proc_fops, mode, device);
}
if (hotk->methods->mt_mled) {
- asus_proc_add(PROC_MLED, &proc_write_mled, &proc_read_mled,
- mode, device);
+ asus_proc_add(PROC_MLED, &mled_proc_fops, mode, device);
}
if (hotk->methods->mt_tled) {
- asus_proc_add(PROC_TLED, &proc_write_tled, &proc_read_tled,
- mode, device);
+ asus_proc_add(PROC_TLED, &tled_proc_fops, mode, device);
}
if (hotk->methods->mt_bt_switch) {
- asus_proc_add(PROC_BT, &proc_write_bluetooth,
- &proc_read_bluetooth, mode, device);
+ asus_proc_add(PROC_BT, &bluetooth_proc_fops, mode, device);
}
/*
@@ -1060,19 +1131,16 @@ static int asus_hotk_add_fs(struct acpi_device *device)
* accessible from the keyboard
*/
if (hotk->methods->mt_lcd_switch && hotk->methods->lcd_status) {
- asus_proc_add(PROC_LCD, &proc_write_lcd, &proc_read_lcd, mode,
- device);
+ asus_proc_add(PROC_LCD, &lcd_proc_fops, mode, device);
}
if ((hotk->methods->brightness_up && hotk->methods->brightness_down) ||
(hotk->methods->brightness_get && hotk->methods->brightness_set)) {
- asus_proc_add(PROC_BRN, &proc_write_brn, &proc_read_brn, mode,
- device);
+ asus_proc_add(PROC_BRN, &brn_proc_fops, mode, device);
}
if (hotk->methods->display_set) {
- asus_proc_add(PROC_DISP, &proc_write_disp, &proc_read_disp,
- mode, device);
+ asus_proc_add(PROC_DISP, &disp_proc_fops, mode, device);
}
return 0;
@@ -1334,9 +1402,6 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_status status = AE_OK;
int result;
- if (!device)
- return -EINVAL;
-
printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
ASUS_ACPI_VERSION);
@@ -1392,9 +1457,6 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
asus_hotk_remove_fs(device);
kfree(hotk);
@@ -1422,21 +1484,17 @@ static int __init asus_acpi_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
+ result = acpi_bus_register_driver(&asus_hotk_driver);
+ if (result < 0)
+ return result;
asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
if (!asus_proc_dir) {
printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ acpi_bus_unregister_driver(&asus_hotk_driver);
return -ENODEV;
}
- result = acpi_bus_register_driver(&asus_hotk_driver);
- if (result < 0) {
- remove_proc_entry(PROC_ASUS, acpi_root_dir);
- return result;
- }
-
/*
* This is a bit of a kludge. We only want this module loaded
* for ASUS systems, but there's currently no way to probe the
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
new file mode 100644
index 0000000..ed90082
--- /dev/null
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -0,0 +1,609 @@
+/*
+ * Copyright (C) 2009 Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+
+MODULE_LICENSE("GPL");
+
+
+struct cmpc_accel {
+ int sensitivity;
+};
+
+#define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
+
+
+/*
+ * Generic input device code.
+ */
+
+typedef void (*input_device_init)(struct input_dev *dev);
+
+static int cmpc_add_acpi_notify_device(struct acpi_device *acpi, char *name,
+ input_device_init idev_init)
+{
+ struct input_dev *inputdev;
+ int error;
+
+ inputdev = input_allocate_device();
+ if (!inputdev)
+ return -ENOMEM;
+ inputdev->name = name;
+ inputdev->dev.parent = &acpi->dev;
+ idev_init(inputdev);
+ error = input_register_device(inputdev);
+ if (error) {
+ input_free_device(inputdev);
+ return error;
+ }
+ dev_set_drvdata(&acpi->dev, inputdev);
+ return 0;
+}
+
+static int cmpc_remove_acpi_notify_device(struct acpi_device *acpi)
+{
+ struct input_dev *inputdev = dev_get_drvdata(&acpi->dev);
+ input_unregister_device(inputdev);
+ return 0;
+}
+
+/*
+ * Accelerometer code.
+ */
+static acpi_status cmpc_start_accel(acpi_handle handle)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x3;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_stop_accel(acpi_handle handle)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x4;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_accel_set_sensitivity(acpi_handle handle, int val)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x02;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ input.count = 2;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_get_accel(acpi_handle handle,
+ unsigned char *x,
+ unsigned char *y,
+ unsigned char *z)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 };
+ unsigned char *locs;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x01;
+ param[1].type = ACPI_TYPE_INTEGER;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, &output);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *obj;
+ obj = output.pointer;
+ locs = obj->buffer.pointer;
+ *x = locs[0];
+ *y = locs[1];
+ *z = locs[2];
+ kfree(output.pointer);
+ }
+ return status;
+}
+
+static void cmpc_accel_handler(struct acpi_device *dev, u32 event)
+{
+ if (event == 0x81) {
+ unsigned char x, y, z;
+ acpi_status status;
+
+ status = cmpc_get_accel(dev->handle, &x, &y, &z);
+ if (ACPI_SUCCESS(status)) {
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ input_report_abs(inputdev, ABS_X, x);
+ input_report_abs(inputdev, ABS_Y, y);
+ input_report_abs(inputdev, ABS_Z, z);
+ input_sync(inputdev);
+ }
+ }
+}
+
+static ssize_t cmpc_accel_sensitivity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->sensitivity);
+}
+
+static ssize_t cmpc_accel_sensitivity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long sensitivity;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = strict_strtoul(buf, 0, &sensitivity);
+ if (r)
+ return r;
+
+ accel->sensitivity = sensitivity;
+ cmpc_accel_set_sensitivity(acpi->handle, sensitivity);
+
+ return strnlen(buf, count);
+}
+
+struct device_attribute cmpc_accel_sensitivity_attr = {
+ .attr = { .name = "sensitivity", .mode = 0660 },
+ .show = cmpc_accel_sensitivity_show,
+ .store = cmpc_accel_sensitivity_store
+};
+
+static int cmpc_accel_open(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+
+ acpi = to_acpi_device(input->dev.parent);
+ if (ACPI_SUCCESS(cmpc_start_accel(acpi->handle)))
+ return 0;
+ return -EIO;
+}
+
+static void cmpc_accel_close(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+
+ acpi = to_acpi_device(input->dev.parent);
+ cmpc_stop_accel(acpi->handle);
+}
+
+static void cmpc_accel_idev_init(struct input_dev *inputdev)
+{
+ set_bit(EV_ABS, inputdev->evbit);
+ input_set_abs_params(inputdev, ABS_X, 0, 255, 8, 0);
+ input_set_abs_params(inputdev, ABS_Y, 0, 255, 8, 0);
+ input_set_abs_params(inputdev, ABS_Z, 0, 255, 8, 0);
+ inputdev->open = cmpc_accel_open;
+ inputdev->close = cmpc_accel_close;
+}
+
+static int cmpc_accel_add(struct acpi_device *acpi)
+{
+ int error;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ accel = kmalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return -ENOMEM;
+
+ accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT;
+ cmpc_accel_set_sensitivity(acpi->handle, accel->sensitivity);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+ if (error)
+ goto failed_file;
+
+ error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel",
+ cmpc_accel_idev_init);
+ if (error)
+ goto failed_input;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ dev_set_drvdata(&inputdev->dev, accel);
+
+ return 0;
+
+failed_input:
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+failed_file:
+ kfree(accel);
+ return error;
+}
+
+static int cmpc_accel_remove(struct acpi_device *acpi, int type)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static const struct acpi_device_id cmpc_accel_device_ids[] = {
+ {"ACCE0000", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, cmpc_accel_device_ids);
+
+static struct acpi_driver cmpc_accel_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_accel",
+ .class = "cmpc_accel",
+ .ids = cmpc_accel_device_ids,
+ .ops = {
+ .add = cmpc_accel_add,
+ .remove = cmpc_accel_remove,
+ .notify = cmpc_accel_handler,
+ }
+};
+
+
+/*
+ * Tablet mode code.
+ */
+static acpi_status cmpc_get_tablet(acpi_handle handle,
+ unsigned long long *value)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0x01;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "TCMD", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static void cmpc_tablet_handler(struct acpi_device *dev, u32 event)
+{
+ unsigned long long val = 0;
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ if (event == 0x81) {
+ if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val)))
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ }
+}
+
+static void cmpc_tablet_idev_init(struct input_dev *inputdev)
+{
+ unsigned long long val = 0;
+ struct acpi_device *acpi;
+
+ set_bit(EV_SW, inputdev->evbit);
+ set_bit(SW_TABLET_MODE, inputdev->swbit);
+
+ acpi = to_acpi_device(inputdev->dev.parent);
+ if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+}
+
+static int cmpc_tablet_add(struct acpi_device *acpi)
+{
+ return cmpc_add_acpi_notify_device(acpi, "cmpc_tablet",
+ cmpc_tablet_idev_init);
+}
+
+static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
+{
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static int cmpc_tablet_resume(struct acpi_device *acpi)
+{
+ struct input_dev *inputdev = dev_get_drvdata(&acpi->dev);
+ unsigned long long val = 0;
+ if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+ input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ return 0;
+}
+
+static const struct acpi_device_id cmpc_tablet_device_ids[] = {
+ {"TBLT0000", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, cmpc_tablet_device_ids);
+
+static struct acpi_driver cmpc_tablet_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_tablet",
+ .class = "cmpc_tablet",
+ .ids = cmpc_tablet_device_ids,
+ .ops = {
+ .add = cmpc_tablet_add,
+ .remove = cmpc_tablet_remove,
+ .resume = cmpc_tablet_resume,
+ .notify = cmpc_tablet_handler,
+ }
+};
+
+
+/*
+ * Backlight code.
+ */
+
+static acpi_status cmpc_get_brightness(acpi_handle handle,
+ unsigned long long *value)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ unsigned long long output;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0xC0;
+ input.count = 1;
+ input.pointer = &param;
+ status = acpi_evaluate_integer(handle, "GRDI", &input, &output);
+ if (ACPI_SUCCESS(status))
+ *value = output;
+ return status;
+}
+
+static acpi_status cmpc_set_brightness(acpi_handle handle,
+ unsigned long long value)
+{
+ union acpi_object param[2];
+ struct acpi_object_list input;
+ acpi_status status;
+ unsigned long long output;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0xC0;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = value;
+ input.count = 2;
+ input.pointer = param;
+ status = acpi_evaluate_integer(handle, "GWRI", &input, &output);
+ return status;
+}
+
+static int cmpc_bl_get_brightness(struct backlight_device *bd)
+{
+ acpi_status status;
+ acpi_handle handle;
+ unsigned long long brightness;
+
+ handle = bl_get_data(bd);
+ status = cmpc_get_brightness(handle, &brightness);
+ if (ACPI_SUCCESS(status))
+ return brightness;
+ else
+ return -1;
+}
+
+static int cmpc_bl_update_status(struct backlight_device *bd)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = bl_get_data(bd);
+ status = cmpc_set_brightness(handle, bd->props.brightness);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -1;
+}
+
+static struct backlight_ops cmpc_bl_ops = {
+ .get_brightness = cmpc_bl_get_brightness,
+ .update_status = cmpc_bl_update_status
+};
+
+static int cmpc_bl_add(struct acpi_device *acpi)
+{
+ struct backlight_device *bd;
+
+ bd = backlight_device_register("cmpc_bl", &acpi->dev,
+ acpi->handle, &cmpc_bl_ops);
+ bd->props.max_brightness = 7;
+ dev_set_drvdata(&acpi->dev, bd);
+ return 0;
+}
+
+static int cmpc_bl_remove(struct acpi_device *acpi, int type)
+{
+ struct backlight_device *bd;
+
+ bd = dev_get_drvdata(&acpi->dev);
+ backlight_device_unregister(bd);
+ return 0;
+}
+
+static const struct acpi_device_id cmpc_device_ids[] = {
+ {"IPML200", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
+
+static struct acpi_driver cmpc_bl_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc",
+ .class = "cmpc",
+ .ids = cmpc_device_ids,
+ .ops = {
+ .add = cmpc_bl_add,
+ .remove = cmpc_bl_remove
+ }
+};
+
+
+/*
+ * Extra keys code.
+ */
+static int cmpc_keys_codes[] = {
+ KEY_UNKNOWN,
+ KEY_WLAN,
+ KEY_SWITCHVIDEOMODE,
+ KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESSUP,
+ KEY_VENDOR,
+ KEY_MAX
+};
+
+static void cmpc_keys_handler(struct acpi_device *dev, u32 event)
+{
+ struct input_dev *inputdev;
+ int code = KEY_MAX;
+
+ if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes))
+ code = cmpc_keys_codes[event & 0x0F];
+ inputdev = dev_get_drvdata(&dev->dev);;
+ input_report_key(inputdev, code, !(event & 0x10));
+}
+
+static void cmpc_keys_idev_init(struct input_dev *inputdev)
+{
+ int i;
+
+ set_bit(EV_KEY, inputdev->evbit);
+ for (i = 0; cmpc_keys_codes[i] != KEY_MAX; i++)
+ set_bit(cmpc_keys_codes[i], inputdev->keybit);
+}
+
+static int cmpc_keys_add(struct acpi_device *acpi)
+{
+ return cmpc_add_acpi_notify_device(acpi, "cmpc_keys",
+ cmpc_keys_idev_init);
+}
+
+static int cmpc_keys_remove(struct acpi_device *acpi, int type)
+{
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static const struct acpi_device_id cmpc_keys_device_ids[] = {
+ {"FnBT0000", 0},
+ {"", 0}
+};
+MODULE_DEVICE_TABLE(acpi, cmpc_keys_device_ids);
+
+static struct acpi_driver cmpc_keys_acpi_driver = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_keys",
+ .class = "cmpc_keys",
+ .ids = cmpc_keys_device_ids,
+ .ops = {
+ .add = cmpc_keys_add,
+ .remove = cmpc_keys_remove,
+ .notify = cmpc_keys_handler,
+ }
+};
+
+
+/*
+ * General init/exit code.
+ */
+
+static int cmpc_init(void)
+{
+ int r;
+
+ r = acpi_bus_register_driver(&cmpc_keys_acpi_driver);
+ if (r)
+ goto failed_keys;
+
+ r = acpi_bus_register_driver(&cmpc_bl_acpi_driver);
+ if (r)
+ goto failed_bl;
+
+ r = acpi_bus_register_driver(&cmpc_tablet_acpi_driver);
+ if (r)
+ goto failed_tablet;
+
+ r = acpi_bus_register_driver(&cmpc_accel_acpi_driver);
+ if (r)
+ goto failed_accel;
+
+ return r;
+
+failed_accel:
+ acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
+
+failed_tablet:
+ acpi_bus_unregister_driver(&cmpc_bl_acpi_driver);
+
+failed_bl:
+ acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
+
+failed_keys:
+ return r;
+}
+
+static void cmpc_exit(void)
+{
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_bl_acpi_driver);
+ acpi_bus_unregister_driver(&cmpc_keys_acpi_driver);
+}
+
+module_init(cmpc_init);
+module_exit(cmpc_exit);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 11003bb..1a387e7 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -51,7 +51,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define COMPAL_DRIVER_VERSION "0.2.6"
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 74909c4..3780994 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -58,6 +58,14 @@ static int da_command_code;
static int da_num_tokens;
static struct calling_interface_token *da_tokens;
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-laptop",
+ .owner = THIS_MODULE,
+ }
+};
+
+static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
@@ -74,7 +82,7 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
{ }
};
-static void parse_da_table(const struct dmi_header *dm)
+static void __init parse_da_table(const struct dmi_header *dm)
{
/* Final token is a terminator, so we don't want to copy it */
int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
@@ -103,7 +111,7 @@ static void parse_da_table(const struct dmi_header *dm)
da_num_tokens += tokens;
}
-static void find_tokens(const struct dmi_header *dm, void *dummy)
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0xd4: /* Indexed IO */
@@ -197,8 +205,8 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
dell_send_request(&buffer, 17, 11);
status = buffer.output[1];
- if (status & BIT(bit))
- rfkill_set_hw_state(rfkill, !!(status & BIT(16)));
+ rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
+ rfkill_set_hw_state(rfkill, !(status & BIT(16)));
}
static const struct rfkill_ops dell_rfkill_ops = {
@@ -206,7 +214,7 @@ static const struct rfkill_ops dell_rfkill_ops = {
.query = dell_rfkill_query,
};
-static int dell_setup_rfkill(void)
+static int __init dell_setup_rfkill(void)
{
struct calling_interface_buffer buffer;
int status;
@@ -217,7 +225,8 @@ static int dell_setup_rfkill(void)
status = buffer.output[1];
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
- wifi_rfkill = rfkill_alloc("dell-wifi", NULL, RFKILL_TYPE_WLAN,
+ wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
+ RFKILL_TYPE_WLAN,
&dell_rfkill_ops, (void *) 1);
if (!wifi_rfkill) {
ret = -ENOMEM;
@@ -229,7 +238,8 @@ static int dell_setup_rfkill(void)
}
if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
- bluetooth_rfkill = rfkill_alloc("dell-bluetooth", NULL,
+ bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
+ &platform_device->dev,
RFKILL_TYPE_BLUETOOTH,
&dell_rfkill_ops, (void *) 2);
if (!bluetooth_rfkill) {
@@ -242,7 +252,9 @@ static int dell_setup_rfkill(void)
}
if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
- wwan_rfkill = rfkill_alloc("dell-wwan", NULL, RFKILL_TYPE_WWAN,
+ wwan_rfkill = rfkill_alloc("dell-wwan",
+ &platform_device->dev,
+ RFKILL_TYPE_WWAN,
&dell_rfkill_ops, (void *) 3);
if (!wwan_rfkill) {
ret = -ENOMEM;
@@ -268,6 +280,22 @@ err_wifi:
return ret;
}
+static void dell_cleanup_rfkill(void)
+{
+ if (wifi_rfkill) {
+ rfkill_unregister(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+ if (wwan_rfkill) {
+ rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
+}
+
static int dell_send_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
@@ -326,11 +354,23 @@ static int __init dell_init(void)
return -ENODEV;
}
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+ platform_device = platform_device_alloc("dell-laptop", -1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
ret = dell_setup_rfkill();
if (ret) {
printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
- goto out;
+ goto fail_rfkill;
}
#ifdef CONFIG_ACPI
@@ -352,13 +392,13 @@ static int __init dell_init(void)
if (max_intensity) {
dell_backlight_device = backlight_device_register(
"dell_backlight",
- NULL, NULL,
+ &platform_device->dev, NULL,
&dell_ops);
if (IS_ERR(dell_backlight_device)) {
ret = PTR_ERR(dell_backlight_device);
dell_backlight_device = NULL;
- goto out;
+ goto fail_backlight;
}
dell_backlight_device->props.max_brightness = max_intensity;
@@ -368,13 +408,16 @@ static int __init dell_init(void)
}
return 0;
-out:
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
- if (wwan_rfkill)
- rfkill_unregister(wwan_rfkill);
+
+fail_backlight:
+ dell_cleanup_rfkill();
+fail_rfkill:
+ platform_device_del(platform_device);
+fail_platform_device2:
+ platform_device_put(platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&platform_driver);
+fail_platform_driver:
kfree(da_tokens);
return ret;
}
@@ -382,12 +425,7 @@ out:
static void __exit dell_exit(void)
{
backlight_device_unregister(dell_backlight_device);
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
- if (wwan_rfkill)
- rfkill_unregister(wwan_rfkill);
+ dell_cleanup_rfkill();
}
module_init(dell_init);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 0f900cc..1b1dddb 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -31,6 +31,7 @@
#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
@@ -38,6 +39,8 @@ MODULE_LICENSE("GPL");
#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
+static int acpi_video;
+
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
struct key_entry {
@@ -54,7 +57,7 @@ enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
* via the keyboard controller so should not be sent again.
*/
-static struct key_entry dell_wmi_keymap[] = {
+static struct key_entry dell_legacy_wmi_keymap[] = {
{KE_KEY, 0xe045, KEY_PROG1},
{KE_KEY, 0xe009, KEY_EJECTCD},
@@ -72,7 +75,7 @@ static struct key_entry dell_wmi_keymap[] = {
/* The next device is at offset 6, the active devices are at
offset 8 and the attached devices at offset 10 */
- {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE},
+ {KE_KEY, 0xe00b, KEY_SWITCHVIDEOMODE},
{KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
@@ -96,6 +99,47 @@ static struct key_entry dell_wmi_keymap[] = {
{KE_END, 0}
};
+static bool dell_new_hk_type;
+
+struct dell_new_keymap_entry {
+ u16 scancode;
+ u16 keycode;
+};
+
+struct dell_hotkey_table {
+ struct dmi_header header;
+ struct dell_new_keymap_entry keymap[];
+
+};
+
+static struct key_entry *dell_new_wmi_keymap;
+
+static u16 bios_to_linux_keycode[256] = {
+
+ KEY_MEDIA, KEY_NEXTSONG, KEY_PLAYPAUSE, KEY_PREVIOUSSONG,
+ KEY_STOPCD, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_WWW, KEY_UNKNOWN, KEY_VOLUMEDOWN, KEY_MUTE,
+ KEY_VOLUMEUP, KEY_UNKNOWN, KEY_BATTERY, KEY_EJECTCD,
+ KEY_UNKNOWN, KEY_SLEEP, KEY_PROG1, KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE,
+ KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2,
+ KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ KEY_PROG3
+};
+
+
+static struct key_entry *dell_wmi_keymap = dell_legacy_wmi_keymap;
+
static struct input_dev *dell_wmi_input_dev;
static struct key_entry *dell_wmi_get_entry_by_scancode(int code)
@@ -158,30 +202,90 @@ static void dell_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
+ acpi_status status;
- wmi_get_event_data(value, &response);
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
+ return;
+ }
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER) {
- int *buffer = (int *)obj->buffer.pointer;
- /*
- * The upper bytes of the event may contain
- * additional information, so mask them off for the
- * scancode lookup
- */
- key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF);
- if (key) {
+ int reported_key;
+ u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+ if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
+ printk(KERN_INFO "dell-wmi: Received unknown WMI event"
+ " (0x%x)\n", buffer_entry[1]);
+ return;
+ }
+
+ if (dell_new_hk_type)
+ reported_key = (int)buffer_entry[2];
+ else
+ reported_key = (int)buffer_entry[1] & 0xffff;
+
+ key = dell_wmi_get_entry_by_scancode(reported_key);
+
+ if (!key) {
+ printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
+ reported_key);
+ } else if ((key->keycode == KEY_BRIGHTNESSUP ||
+ key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
+ /* Don't report brightness notifications that will also
+ * come via ACPI */
+ return;
+ } else {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
input_report_key(dell_wmi_input_dev, key->keycode, 0);
input_sync(dell_wmi_input_dev);
- } else if (buffer[1] & 0xFFFF)
- printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- buffer[1] & 0xFFFF);
+ }
+ }
+ kfree(obj);
+}
+
+
+static void setup_new_hk_map(const struct dmi_header *dm)
+{
+
+ int i;
+ int hotkey_num = (dm->length-4)/sizeof(struct dell_new_keymap_entry);
+ struct dell_hotkey_table *table =
+ container_of(dm, struct dell_hotkey_table, header);
+
+ dell_new_wmi_keymap = kzalloc((hotkey_num+1) *
+ sizeof(struct key_entry), GFP_KERNEL);
+
+ for (i = 0; i < hotkey_num; i++) {
+ dell_new_wmi_keymap[i].type = KE_KEY;
+ dell_new_wmi_keymap[i].code = table->keymap[i].scancode;
+ dell_new_wmi_keymap[i].keycode =
+ (table->keymap[i].keycode > 255) ? 0 :
+ bios_to_linux_keycode[table->keymap[i].keycode];
+ }
+
+ dell_new_wmi_keymap[i].type = KE_END;
+ dell_new_wmi_keymap[i].code = 0;
+ dell_new_wmi_keymap[i].keycode = 0;
+
+ dell_wmi_keymap = dell_new_wmi_keymap;
+
+}
+
+
+static void find_hk_type(const struct dmi_header *dm, void *dummy)
+{
+
+ if ((dm->type == 0xb2) && (dm->length > 6)) {
+ dell_new_hk_type = true;
+ setup_new_hk_map(dm);
}
+
}
+
static int __init dell_wmi_input_setup(void)
{
struct key_entry *key;
@@ -224,34 +328,37 @@ static int __init dell_wmi_input_setup(void)
static int __init dell_wmi_init(void)
{
int err;
+ acpi_status status;
- if (wmi_has_guid(DELL_EVENT_GUID)) {
- err = dell_wmi_input_setup();
+ if (!wmi_has_guid(DELL_EVENT_GUID)) {
+ printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
+ return -ENODEV;
+ }
- if (err)
- return err;
+ dmi_walk(find_hk_type, NULL);
+ acpi_video = acpi_video_backlight_support();
- err = wmi_install_notify_handler(DELL_EVENT_GUID,
- dell_wmi_notify, NULL);
- if (err) {
- input_unregister_device(dell_wmi_input_dev);
- printk(KERN_ERR "dell-wmi: Unable to register"
- " notify handler - %d\n", err);
- return err;
- }
+ err = dell_wmi_input_setup();
+ if (err)
+ return err;
- } else
- printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
+ status = wmi_install_notify_handler(DELL_EVENT_GUID,
+ dell_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ input_unregister_device(dell_wmi_input_dev);
+ printk(KERN_ERR
+ "dell-wmi: Unable to register notify handler - %d\n",
+ status);
+ return -ENODEV;
+ }
return 0;
}
static void __exit dell_wmi_exit(void)
{
- if (wmi_has_guid(DELL_EVENT_GUID)) {
- wmi_remove_notify_handler(DELL_EVENT_GUID);
- input_unregister_device(dell_wmi_input_dev);
- }
+ wmi_remove_notify_handler(DELL_EVENT_GUID);
+ input_unregister_device(dell_wmi_input_dev);
}
module_init(dell_wmi_init);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e647a85..e2be6bb 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1,5 +1,5 @@
/*
- * eepc-laptop.c - Asus Eee PC extras
+ * eeepc-laptop.c - Asus Eee PC extras
*
* Based on asus_acpi.c as patched for the Eee PC by Asus:
* ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
@@ -31,23 +31,36 @@
#include <acpi/acpi_bus.h>
#include <linux/uaccess.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/leds.h>
+#include <linux/dmi.h>
#define EEEPC_LAPTOP_VERSION "0.1"
+#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
+#define EEEPC_LAPTOP_FILE "eeepc"
-#define EEEPC_HOTK_NAME "Eee PC Hotkey Driver"
-#define EEEPC_HOTK_FILE "eeepc"
-#define EEEPC_HOTK_CLASS "hotkey"
-#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
-#define EEEPC_HOTK_HID "ASUS010"
+#define EEEPC_ACPI_CLASS "hotkey"
+#define EEEPC_ACPI_DEVICE_NAME "Hotkey"
+#define EEEPC_ACPI_HID "ASUS010"
+MODULE_AUTHOR("Corentin Chary, Eric Cooper");
+MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
+MODULE_LICENSE("GPL");
+
+static bool hotplug_disabled;
+
+module_param(hotplug_disabled, bool, 0644);
+MODULE_PARM_DESC(hotplug_disabled,
+ "Disable hotplug for wireless device. "
+ "If your laptop need that, please report to "
+ "acpi4asus-user@lists.sourceforge.net.");
/*
* Definitions for Asus EeePC
*/
-#define NOTIFY_WLAN_ON 0x10
#define NOTIFY_BRN_MIN 0x20
#define NOTIFY_BRN_MAX 0x2f
@@ -117,145 +130,64 @@ static const char *cm_setv[] = {
NULL, NULL, "PBPS", "TPDS"
};
-#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
-
-#define EEEPC_EC_FAN_PWM EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
-#define EEEPC_EC_SC02 0x63
-#define EEEPC_EC_FAN_HRPM EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_LRPM EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_CTRL EEEPC_EC "SFB3" /* Byte containing SF25 */
-#define EEEPC_EC_SFB3 0xD3
+static const struct key_entry eeepc_keymap[] = {
+ { KE_KEY, 0x10, { KEY_WLAN } },
+ { KE_KEY, 0x11, { KEY_WLAN } },
+ { KE_KEY, 0x12, { KEY_PROG1 } },
+ { KE_KEY, 0x13, { KEY_MUTE } },
+ { KE_KEY, 0x14, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x15, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x16, { KEY_DISPLAY_OFF } },
+ { KE_KEY, 0x1a, { KEY_COFFEE } },
+ { KE_KEY, 0x1b, { KEY_ZOOM } },
+ { KE_KEY, 0x1c, { KEY_PROG2 } },
+ { KE_KEY, 0x1d, { KEY_PROG3 } },
+ { KE_KEY, NOTIFY_BRN_MIN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, NOTIFY_BRN_MAX, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x30, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x31, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
+ { KE_KEY, 0x38, { KEY_F14 } },
+ { KE_END, 0 },
+};
/*
* This is the main structure, we can use it to store useful information
- * about the hotk device
*/
-struct eeepc_hotk {
- struct acpi_device *device; /* the device we are in */
- acpi_handle handle; /* the handle of the hotk device */
+struct eeepc_laptop {
+ acpi_handle handle; /* the handle of the acpi device */
u32 cm_supported; /* the control methods supported
by this BIOS */
- uint init_flag; /* Init flags */
+ bool cpufv_disabled;
+ bool hotplug_disabled;
u16 event_count[128]; /* count for each event */
+
+ struct platform_device *platform_device;
+ struct device *hwmon_device;
+ struct backlight_device *backlight_device;
+
struct input_dev *inputdev;
- u16 *keycode_map;
+ struct key_entry *keymap;
+
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
+
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
-};
-
-/* The actual device the driver binds to */
-static struct eeepc_hotk *ehotk;
-
-/* Platform device/driver */
-static int eeepc_hotk_thaw(struct device *device);
-static int eeepc_hotk_restore(struct device *device);
-
-static const struct dev_pm_ops eeepc_pm_ops = {
- .thaw = eeepc_hotk_thaw,
- .restore = eeepc_hotk_restore,
-};
-
-static struct platform_driver platform_driver = {
- .driver = {
- .name = EEEPC_HOTK_FILE,
- .owner = THIS_MODULE,
- .pm = &eeepc_pm_ops,
- }
-};
-
-static struct platform_device *platform_device;
-
-struct key_entry {
- char type;
- u8 code;
- u16 keycode;
-};
-
-enum { KE_KEY, KE_END };
-
-static struct key_entry eeepc_keymap[] = {
- /* Sleep already handled via generic ACPI code */
- {KE_KEY, 0x10, KEY_WLAN },
- {KE_KEY, 0x11, KEY_WLAN },
- {KE_KEY, 0x12, KEY_PROG1 },
- {KE_KEY, 0x13, KEY_MUTE },
- {KE_KEY, 0x14, KEY_VOLUMEDOWN },
- {KE_KEY, 0x15, KEY_VOLUMEUP },
- {KE_KEY, 0x1a, KEY_COFFEE },
- {KE_KEY, 0x1b, KEY_ZOOM },
- {KE_KEY, 0x1c, KEY_PROG2 },
- {KE_KEY, 0x1d, KEY_PROG3 },
- {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN },
- {KE_KEY, NOTIFY_BRN_MIN + 2, KEY_BRIGHTNESSUP },
- {KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
- {KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
- {KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
- {KE_END, 0},
-};
-
-/*
- * The hotkey driver declaration
- */
-static int eeepc_hotk_add(struct acpi_device *device);
-static int eeepc_hotk_remove(struct acpi_device *device, int type);
-static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id eeepc_device_ids[] = {
- {EEEPC_HOTK_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
-
-static struct acpi_driver eeepc_hotk_driver = {
- .name = EEEPC_HOTK_NAME,
- .class = EEEPC_HOTK_CLASS,
- .ids = eeepc_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = eeepc_hotk_add,
- .remove = eeepc_hotk_remove,
- .notify = eeepc_hotk_notify,
- },
-};
-/* PCI hotplug ops */
-static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
-
-static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .get_adapter_status = eeepc_get_adapter_status,
- .get_power_status = eeepc_get_adapter_status,
+ struct led_classdev tpd_led;
+ int tpd_led_wk;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct tpd_led_work;
};
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *eeepc_backlight_device;
-
-/* The hwmon device */
-static struct device *eeepc_hwmon_device;
-
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops eeepcbl_ops = {
- .get_brightness = read_brightness,
- .update_status = update_bl_status,
-};
-
-MODULE_AUTHOR("Corentin Chary, Eric Cooper");
-MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
-MODULE_LICENSE("GPL");
-
/*
* ACPI Helpers
*/
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
- struct acpi_buffer *output)
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
struct acpi_object_list params;
union acpi_object in_obj;
@@ -266,7 +198,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
- status = acpi_evaluate_object(handle, (char *)method, &params, output);
+ status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
return (status == AE_OK ? 0 : -1);
}
@@ -285,81 +217,56 @@ static int read_acpi_int(acpi_handle handle, const char *method, int *val)
}
}
-static int set_acpi(int cm, int value)
+static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
{
- if (ehotk->cm_supported & (0x1 << cm)) {
- const char *method = cm_setv[cm];
- if (method == NULL)
- return -ENODEV;
- if (write_acpi_int(ehotk->handle, method, value, NULL))
- pr_warning("Error writing %s\n", method);
- }
- return 0;
-}
-
-static int get_acpi(int cm)
-{
- int value = -ENODEV;
- if ((ehotk->cm_supported & (0x1 << cm))) {
- const char *method = cm_getv[cm];
- if (method == NULL)
- return -ENODEV;
- if (read_acpi_int(ehotk->handle, method, &value))
- pr_warning("Error reading %s\n", method);
- }
- return value;
-}
+ const char *method = cm_setv[cm];
-/*
- * Backlight
- */
-static int read_brightness(struct backlight_device *bd)
-{
- return get_acpi(CM_ASL_PANELBRIGHT);
-}
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static int set_brightness(struct backlight_device *bd, int value)
-{
- value = max(0, min(15, value));
- return set_acpi(CM_ASL_PANELBRIGHT, value);
+ if (write_acpi_int(eeepc->handle, method, value))
+ pr_warning("Error writing %s\n", method);
+ return 0;
}
-static int update_bl_status(struct backlight_device *bd)
+static int get_acpi(struct eeepc_laptop *eeepc, int cm)
{
- return set_brightness(bd, bd->props.brightness);
-}
+ const char *method = cm_getv[cm];
+ int value;
-/*
- * Rfkill helpers
- */
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static bool eeepc_wlan_rfkill_blocked(void)
-{
- if (get_acpi(CM_ASL_WLAN) == 1)
- return false;
- return true;
+ if (read_acpi_int(eeepc->handle, method, &value))
+ pr_warning("Error reading %s\n", method);
+ return value;
}
-static int eeepc_rfkill_set(void *data, bool blocked)
+static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
+ acpi_handle *handle)
{
- unsigned long asl = (unsigned long)data;
- return set_acpi(asl, !blocked);
-}
+ const char *method = cm_setv[cm];
+ acpi_status status;
-static const struct rfkill_ops eeepc_rfkill_ops = {
- .set_block = eeepc_rfkill_set,
-};
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static void __devinit eeepc_enable_camera(void)
-{
- /*
- * If the following call to set_acpi() fails, it's because there's no
- * camera so we can ignore the error.
- */
- if (get_acpi(CM_ASL_CAMERA) == 0)
- set_acpi(CM_ASL_CAMERA, 1);
+ status = acpi_get_handle(eeepc->handle, (char *)method,
+ handle);
+ if (status != AE_OK) {
+ pr_warning("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
}
+
/*
* Sys helpers
*/
@@ -372,60 +279,63 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
return count;
}
-static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
+static ssize_t store_sys_acpi(struct device *dev, int cm,
+ const char *buf, size_t count)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- value = set_acpi(cm, value);
+ value = set_acpi(eeepc, cm, value);
if (value < 0)
- return value;
+ return -EIO;
return rv;
}
-static ssize_t show_sys_acpi(int cm, char *buf)
+static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
{
- int value = get_acpi(cm);
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int value = get_acpi(eeepc, cm);
if (value < 0)
- return value;
+ return -EIO;
return sprintf(buf, "%d\n", value);
}
-#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
+#define EEEPC_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- return show_sys_acpi(_cm, buf); \
+ return show_sys_acpi(dev, _cm, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- return store_sys_acpi(_cm, buf, count); \
+ return store_sys_acpi(dev, _cm, buf, count); \
} \
static struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
- .mode = 0644 }, \
+ .mode = _mode }, \
.show = show_##_name, \
.store = store_##_name, \
}
-EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
-EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
-EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+EEEPC_CREATE_DEVICE_ATTR(camera, 0644, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR(cardr, 0644, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR(disp, 0200, CM_ASL_DISPLAYSWITCH);
struct eeepc_cpufv {
int num;
int cur;
};
-static int get_cpufv(struct eeepc_cpufv *c)
+static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
{
- c->cur = get_acpi(CM_ASL_CPUFV);
+ c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
c->num = (c->cur >> 8) & 0xff;
c->cur &= 0xff;
if (c->cur < 0 || c->num <= 0 || c->num > 12)
@@ -437,11 +347,12 @@ static ssize_t show_available_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int i;
ssize_t len = 0;
- if (get_cpufv(&c))
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
for (i = 0; i < c.num; i++)
len += sprintf(buf + len, "%d ", i);
@@ -453,9 +364,10 @@ static ssize_t show_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
- if (get_cpufv(&c))
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
}
@@ -464,20 +376,58 @@ static ssize_t store_cpufv(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int rv, value;
- if (get_cpufv(&c))
+ if (eeepc->cpufv_disabled)
+ return -EPERM;
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
rv = parse_arg(buf, count, &value);
if (rv < 0)
return rv;
if (!rv || value < 0 || value >= c.num)
return -EINVAL;
- set_acpi(CM_ASL_CPUFV, value);
+ set_acpi(eeepc, CM_ASL_CPUFV, value);
return rv;
}
+static ssize_t show_cpufv_disabled(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
+}
+
+static ssize_t store_cpufv_disabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int rv, value;
+
+ rv = parse_arg(buf, count, &value);
+ if (rv < 0)
+ return rv;
+
+ switch (value) {
+ case 0:
+ if (eeepc->cpufv_disabled)
+ pr_warning("cpufv enabled (not officially supported "
+ "on this model)\n");
+ eeepc->cpufv_disabled = false;
+ return rv;
+ case 1:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+}
+
+
static struct device_attribute dev_attr_cpufv = {
.attr = {
.name = "cpufv",
@@ -493,12 +443,22 @@ static struct device_attribute dev_attr_available_cpufv = {
.show = show_available_cpufv
};
+static struct device_attribute dev_attr_cpufv_disabled = {
+ .attr = {
+ .name = "cpufv_disabled",
+ .mode = 0644 },
+ .show = show_cpufv_disabled,
+ .store = store_cpufv_disabled
+};
+
+
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
&dev_attr_available_cpufv.attr,
+ &dev_attr_cpufv_disabled.attr,
NULL
};
@@ -506,156 +466,125 @@ static struct attribute_group platform_attribute_group = {
.attrs = platform_attributes
};
-/*
- * Hotkey functions
- */
-static struct key_entry *eepc_get_entry_by_scancode(int code)
+static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
- struct key_entry *key;
-
- for (key = eeepc_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
+ int result;
- return NULL;
-}
+ eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, -1);
+ if (!eeepc->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(eeepc->platform_device, eeepc);
-static struct key_entry *eepc_get_entry_by_keycode(int code)
-{
- struct key_entry *key;
+ result = platform_device_add(eeepc->platform_device);
+ if (result)
+ goto fail_platform_device;
- for (key = eeepc_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
+ result = sysfs_create_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ if (result)
+ goto fail_sysfs;
+ return 0;
- return NULL;
+fail_sysfs:
+ platform_device_del(eeepc->platform_device);
+fail_platform_device:
+ platform_device_put(eeepc->platform_device);
+ return result;
}
-static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+static void eeepc_platform_exit(struct eeepc_laptop *eeepc)
{
- struct key_entry *key = eepc_get_entry_by_scancode(scancode);
+ sysfs_remove_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ platform_device_unregister(eeepc->platform_device);
+}
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
- }
+/*
+ * LEDs
+ */
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
+ {
+ struct eeepc_laptop *eeepc;
- return -EINVAL;
+ eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
+
+ set_acpi(eeepc, CM_ASL_TPD, eeepc->tpd_led_wk);
}
-static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+static void tpd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- struct key_entry *key;
- int old_keycode;
+ struct eeepc_laptop *eeepc;
- if (keycode < 0 || keycode > KEY_MAX)
- return -EINVAL;
+ eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
- key = eepc_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!eepc_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
- }
-
- return -EINVAL;
+ eeepc->tpd_led_wk = (value > 0) ? 1 : 0;
+ queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
}
-static void cmsg_quirk(int cm, const char *name)
+static int eeepc_led_init(struct eeepc_laptop *eeepc)
{
- int dummy;
+ int rv;
- /* Some BIOSes do not report cm although it is avaliable.
- Check if cm_getv[cm] works and, if yes, assume cm should be set. */
- if (!(ehotk->cm_supported & (1 << cm))
- && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
- pr_info("%s (%x) not reported by BIOS,"
- " enabling anyway\n", name, 1 << cm);
- ehotk->cm_supported |= 1 << cm;
- }
-}
+ if (get_acpi(eeepc, CM_ASL_TPD) == -ENODEV)
+ return 0;
-static void cmsg_quirks(void)
-{
- cmsg_quirk(CM_ASL_LID, "LID");
- cmsg_quirk(CM_ASL_TYPE, "TYPE");
- cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
- cmsg_quirk(CM_ASL_TPD, "TPD");
-}
+ eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!eeepc->led_workqueue)
+ return -ENOMEM;
+ INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
-static int eeepc_hotk_check(void)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- int result;
+ eeepc->tpd_led.name = "eeepc::touchpad";
+ eeepc->tpd_led.brightness_set = tpd_led_set;
+ eeepc->tpd_led.max_brightness = 1;
- result = acpi_bus_get_status(ehotk->device);
- if (result)
- return result;
- if (ehotk->device->status.present) {
- if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
- &buffer)) {
- pr_err("Hotkey initialization failed\n");
- return -ENODEV;
- } else {
- pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
- }
- /* get control methods supported */
- if (read_acpi_int(ehotk->handle, "CMSG"
- , &ehotk->cm_supported)) {
- pr_err("Get control methods supported failed\n");
- return -ENODEV;
- } else {
- cmsg_quirks();
- pr_info("Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
- }
- } else {
- pr_err("Hotkey device not present, aborting\n");
- return -EINVAL;
+ rv = led_classdev_register(&eeepc->platform_device->dev,
+ &eeepc->tpd_led);
+ if (rv) {
+ destroy_workqueue(eeepc->led_workqueue);
+ return rv;
}
+
return 0;
}
-static int notify_brn(void)
+static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
- /* returns the *previous* brightness, or -1 */
- struct backlight_device *bd = eeepc_backlight_device;
- if (bd) {
- int old = bd->props.brightness;
- backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
- return old;
- }
- return -1;
+ if (eeepc->tpd_led.dev)
+ led_classdev_unregister(&eeepc->tpd_led);
+ if (eeepc->led_workqueue)
+ destroy_workqueue(eeepc->led_workqueue);
}
-static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
- u8 *value)
-{
- int val = get_acpi(CM_ASL_WLAN);
-
- if (val == 1 || val == 0)
- *value = val;
- else
- return -EINVAL;
- return 0;
+/*
+ * PCI hotplug (for wlan rfkill)
+ */
+static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
+{
+ if (get_acpi(eeepc, CM_ASL_WLAN) == 1)
+ return false;
+ return true;
}
-static void eeepc_rfkill_hotplug(void)
+static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
{
struct pci_dev *dev;
struct pci_bus *bus;
- bool blocked = eeepc_wlan_rfkill_blocked();
+ bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
- if (ehotk->wlan_rfkill)
- rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
+ if (eeepc->wlan_rfkill)
+ rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
- mutex_lock(&ehotk->hotplug_lock);
+ mutex_lock(&eeepc->hotplug_lock);
- if (ehotk->hotplug_slot) {
+ if (eeepc->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warning("Unable to find PCI bus 1?\n");
@@ -685,69 +614,23 @@ static void eeepc_rfkill_hotplug(void)
}
out_unlock:
- mutex_unlock(&ehotk->hotplug_lock);
+ mutex_unlock(&eeepc->hotplug_lock);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
+ struct eeepc_laptop *eeepc = data;
+
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
- eeepc_rfkill_hotplug();
-}
-
-static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
-{
- static struct key_entry *key;
- u16 count;
- int brn = -ENODEV;
-
- if (!ehotk)
- return;
- if (event > ACPI_MAX_SYS_NOTIFY)
- return;
- if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
- brn = notify_brn();
- count = ehotk->event_count[event % 128]++;
- acpi_bus_generate_proc_event(ehotk->device, event, count);
- acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class,
- dev_name(&ehotk->device->dev), event,
- count);
- if (ehotk->inputdev) {
- if (brn != -ENODEV) {
- /* brightness-change events need special
- * handling for conversion to key events
- */
- if (brn < 0)
- brn = event;
- else
- brn += NOTIFY_BRN_MIN;
- if (event < brn)
- event = NOTIFY_BRN_MIN; /* brightness down */
- else if (event > brn)
- event = NOTIFY_BRN_MIN + 2; /* ... up */
- else
- event = NOTIFY_BRN_MIN + 1; /* ... unchanged */
- }
- key = eepc_get_entry_by_scancode(event);
- if (key) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(ehotk->inputdev, key->keycode,
- 1);
- input_sync(ehotk->inputdev);
- input_report_key(ehotk->inputdev, key->keycode,
- 0);
- input_sync(ehotk->inputdev);
- break;
- }
- }
- }
+ eeepc_rfkill_hotplug(eeepc);
}
-static int eeepc_register_rfkill_notifier(char *node)
+static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
{
- acpi_status status = AE_OK;
+ acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
@@ -756,7 +639,7 @@ static int eeepc_register_rfkill_notifier(char *node)
status = acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify,
- NULL);
+ eeepc);
if (ACPI_FAILURE(status))
pr_warning("Failed to register notify on %s\n", node);
} else
@@ -765,7 +648,8 @@ static int eeepc_register_rfkill_notifier(char *node)
return 0;
}
-static void eeepc_unregister_rfkill_notifier(char *node)
+static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
@@ -782,13 +666,33 @@ static void eeepc_unregister_rfkill_notifier(char *node)
}
}
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ struct eeepc_laptop *eeepc = hotplug_slot->private;
+ int val = get_acpi(eeepc, CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
{
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
-static int eeepc_setup_pci_hotplug(void)
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
+static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
@@ -798,22 +702,22 @@ static int eeepc_setup_pci_hotplug(void)
return -ENODEV;
}
- ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!ehotk->hotplug_slot)
+ eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!eeepc->hotplug_slot)
goto error_slot;
- ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
- if (!ehotk->hotplug_slot->info)
+ if (!eeepc->hotplug_slot->info)
goto error_info;
- ehotk->hotplug_slot->private = ehotk;
- ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
- ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
- eeepc_get_adapter_status(ehotk->hotplug_slot,
- &ehotk->hotplug_slot->info->adapter_status);
+ eeepc->hotplug_slot->private = eeepc;
+ eeepc->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(eeepc->hotplug_slot,
+ &eeepc->hotplug_slot->info->adapter_status);
- ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -822,17 +726,159 @@ static int eeepc_setup_pci_hotplug(void)
return 0;
error_register:
- kfree(ehotk->hotplug_slot->info);
+ kfree(eeepc->hotplug_slot->info);
error_info:
- kfree(ehotk->hotplug_slot);
- ehotk->hotplug_slot = NULL;
+ kfree(eeepc->hotplug_slot);
+ eeepc->hotplug_slot = NULL;
error_slot:
return ret;
}
+/*
+ * Rfkill devices
+ */
+static int eeepc_rfkill_set(void *data, bool blocked)
+{
+ acpi_handle handle = data;
+
+ return write_acpi_int(handle, NULL, !blocked);
+}
+
+static const struct rfkill_ops eeepc_rfkill_ops = {
+ .set_block = eeepc_rfkill_set,
+};
+
+static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
+ struct rfkill **rfkill,
+ const char *name,
+ enum rfkill_type type, int cm)
+{
+ acpi_handle handle;
+ int result;
+
+ result = acpi_setter_handle(eeepc, cm, &handle);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
+ &eeepc_rfkill_ops, handle);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(eeepc, cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
+{
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ if (eeepc->wlan_rfkill) {
+ rfkill_unregister(eeepc->wlan_rfkill);
+ rfkill_destroy(eeepc->wlan_rfkill);
+ eeepc->wlan_rfkill = NULL;
+ }
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed after
+ * eeepc_unregister_rfkill_notifier()
+ */
+ eeepc_rfkill_hotplug(eeepc);
+ if (eeepc->hotplug_slot)
+ pci_hp_deregister(eeepc->hotplug_slot);
+
+ if (eeepc->bluetooth_rfkill) {
+ rfkill_unregister(eeepc->bluetooth_rfkill);
+ rfkill_destroy(eeepc->bluetooth_rfkill);
+ eeepc->bluetooth_rfkill = NULL;
+ }
+ if (eeepc->wwan3g_rfkill) {
+ rfkill_unregister(eeepc->wwan3g_rfkill);
+ rfkill_destroy(eeepc->wwan3g_rfkill);
+ eeepc->wwan3g_rfkill = NULL;
+ }
+ if (eeepc->wimax_rfkill) {
+ rfkill_unregister(eeepc->wimax_rfkill);
+ rfkill_destroy(eeepc->wimax_rfkill);
+ eeepc->wimax_rfkill = NULL;
+ }
+}
+
+static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
+{
+ int result = 0;
+
+ mutex_init(&eeepc->hotplug_lock);
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
+ "eeepc-wlan", RFKILL_TYPE_WLAN,
+ CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
+ "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
+ CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
+ "eeepc-wwan3g", RFKILL_TYPE_WWAN,
+ CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wimax_rfkill,
+ "eeepc-wimax", RFKILL_TYPE_WIMAX,
+ CM_ASL_WIMAX);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ if (eeepc->hotplug_disabled)
+ return 0;
+
+ result = eeepc_setup_pci_hotplug(eeepc);
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed during
+ * setup.
+ */
+ eeepc_rfkill_hotplug(eeepc);
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit(eeepc);
+ return result;
+}
+
+/*
+ * Platform driver - hibernate/resume callbacks
+ */
static int eeepc_hotk_thaw(struct device *device)
{
- if (ehotk->wlan_rfkill) {
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
+ if (eeepc->wlan_rfkill) {
bool wlan;
/*
@@ -840,8 +886,8 @@ static int eeepc_hotk_thaw(struct device *device)
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
- wlan = get_acpi(CM_ASL_WLAN);
- set_acpi(CM_ASL_WLAN, wlan);
+ wlan = get_acpi(eeepc, CM_ASL_WLAN);
+ set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
@@ -849,70 +895,96 @@ static int eeepc_hotk_thaw(struct device *device)
static int eeepc_hotk_restore(struct device *device)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
/* Refresh both wlan rfkill state and pci hotplug */
- if (ehotk->wlan_rfkill)
- eeepc_rfkill_hotplug();
-
- if (ehotk->bluetooth_rfkill)
- rfkill_set_sw_state(ehotk->bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- if (ehotk->wwan3g_rfkill)
- rfkill_set_sw_state(ehotk->wwan3g_rfkill,
- get_acpi(CM_ASL_3G) != 1);
- if (ehotk->wimax_rfkill)
- rfkill_set_sw_state(ehotk->wimax_rfkill,
- get_acpi(CM_ASL_WIMAX) != 1);
+ if (eeepc->wlan_rfkill)
+ eeepc_rfkill_hotplug(eeepc);
+
+ if (eeepc->bluetooth_rfkill)
+ rfkill_set_sw_state(eeepc->bluetooth_rfkill,
+ get_acpi(eeepc, CM_ASL_BLUETOOTH) != 1);
+ if (eeepc->wwan3g_rfkill)
+ rfkill_set_sw_state(eeepc->wwan3g_rfkill,
+ get_acpi(eeepc, CM_ASL_3G) != 1);
+ if (eeepc->wimax_rfkill)
+ rfkill_set_sw_state(eeepc->wimax_rfkill,
+ get_acpi(eeepc, CM_ASL_WIMAX) != 1);
return 0;
}
+static const struct dev_pm_ops eeepc_pm_ops = {
+ .thaw = eeepc_hotk_thaw,
+ .restore = eeepc_hotk_restore,
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = EEEPC_LAPTOP_FILE,
+ .owner = THIS_MODULE,
+ .pm = &eeepc_pm_ops,
+ }
+};
+
/*
- * Hwmon
+ * Hwmon device
*/
+
+#define EEEPC_EC_SC00 0x61
+#define EEEPC_EC_FAN_PWM (EEEPC_EC_SC00 + 2) /* Fan PWM duty cycle (%) */
+#define EEEPC_EC_FAN_HRPM (EEEPC_EC_SC00 + 5) /* High byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_LRPM (EEEPC_EC_SC00 + 6) /* Low byte, fan speed (RPM) */
+
+#define EEEPC_EC_SFB0 0xD0
+#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
+
static int eeepc_get_fan_pwm(void)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
- value = value * 255 / 100;
- return (value);
+ ec_read(EEEPC_EC_FAN_PWM, &value);
+ return value * 255 / 100;
}
static void eeepc_set_fan_pwm(int value)
{
value = SENSORS_LIMIT(value, 0, 255);
value = value * 100 / 255;
- ec_write(EEEPC_EC_SC02, value);
+ ec_write(EEEPC_EC_FAN_PWM, value);
}
static int eeepc_get_fan_rpm(void)
{
- int high = 0;
- int low = 0;
+ u8 high = 0;
+ u8 low = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
- read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
- return (high << 8 | low);
+ ec_read(EEEPC_EC_FAN_HRPM, &high);
+ ec_read(EEEPC_EC_FAN_LRPM, &low);
+ return high << 8 | low;
}
static int eeepc_get_fan_ctrl(void)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
- return ((value & 0x02 ? 1 : 0));
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (value & 0x02)
+ return 1; /* manual */
+ else
+ return 2; /* automatic */
}
static void eeepc_set_fan_ctrl(int manual)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
- if (manual)
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (manual == 1)
value |= 0x02;
else
value &= ~0x02;
- ec_write(EEEPC_EC_SFB3, value);
+ ec_write(EEEPC_EC_FAN_CTRL, value);
}
static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
@@ -970,348 +1042,461 @@ static struct attribute_group hwmon_attribute_group = {
.attrs = hwmon_attributes
};
-/*
- * exit/init
- */
-static void eeepc_backlight_exit(void)
+static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc_backlight_device)
- backlight_device_unregister(eeepc_backlight_device);
- eeepc_backlight_device = NULL;
+ struct device *hwmon;
+
+ hwmon = eeepc->hwmon_device;
+ if (!hwmon)
+ return;
+ sysfs_remove_group(&hwmon->kobj,
+ &hwmon_attribute_group);
+ hwmon_device_unregister(hwmon);
+ eeepc->hwmon_device = NULL;
}
-static void eeepc_rfkill_exit(void)
+static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P5");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
- if (ehotk->wlan_rfkill) {
- rfkill_unregister(ehotk->wlan_rfkill);
- rfkill_destroy(ehotk->wlan_rfkill);
- ehotk->wlan_rfkill = NULL;
- }
- /*
- * Refresh pci hotplug in case the rfkill state was changed after
- * eeepc_unregister_rfkill_notifier()
- */
- eeepc_rfkill_hotplug();
- if (ehotk->hotplug_slot)
- pci_hp_deregister(ehotk->hotplug_slot);
-
- if (ehotk->bluetooth_rfkill) {
- rfkill_unregister(ehotk->bluetooth_rfkill);
- rfkill_destroy(ehotk->bluetooth_rfkill);
- ehotk->bluetooth_rfkill = NULL;
- }
- if (ehotk->wwan3g_rfkill) {
- rfkill_unregister(ehotk->wwan3g_rfkill);
- rfkill_destroy(ehotk->wwan3g_rfkill);
- ehotk->wwan3g_rfkill = NULL;
- }
- if (ehotk->wimax_rfkill) {
- rfkill_unregister(ehotk->wimax_rfkill);
- rfkill_destroy(ehotk->wimax_rfkill);
- ehotk->wimax_rfkill = NULL;
+ struct device *hwmon;
+ int result;
+
+ hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+ if (IS_ERR(hwmon)) {
+ pr_err("Could not register eeepc hwmon device\n");
+ eeepc->hwmon_device = NULL;
+ return PTR_ERR(hwmon);
}
+ eeepc->hwmon_device = hwmon;
+ result = sysfs_create_group(&hwmon->kobj,
+ &hwmon_attribute_group);
+ if (result)
+ eeepc_hwmon_exit(eeepc);
+ return result;
}
-static void eeepc_input_exit(void)
+/*
+ * Backlight device
+ */
+static int read_brightness(struct backlight_device *bd)
{
- if (ehotk->inputdev)
- input_unregister_device(ehotk->inputdev);
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
+
+ return get_acpi(eeepc, CM_ASL_PANELBRIGHT);
}
-static void eeepc_hwmon_exit(void)
+static int set_brightness(struct backlight_device *bd, int value)
{
- struct device *hwmon;
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
- hwmon = eeepc_hwmon_device;
- if (!hwmon)
- return ;
- sysfs_remove_group(&hwmon->kobj,
- &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- eeepc_hwmon_device = NULL;
+ return set_acpi(eeepc, CM_ASL_PANELBRIGHT, value);
}
-static int eeepc_new_rfkill(struct rfkill **rfkill,
- const char *name, struct device *dev,
- enum rfkill_type type, int cm)
+static int update_bl_status(struct backlight_device *bd)
{
- int result;
+ return set_brightness(bd, bd->props.brightness);
+}
- result = get_acpi(cm);
- if (result < 0)
- return result;
+static struct backlight_ops eeepcbl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
- *rfkill = rfkill_alloc(name, dev, type,
- &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
+{
+ struct backlight_device *bd = eeepc->backlight_device;
+ int old = bd->props.brightness;
- if (!*rfkill)
- return -EINVAL;
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
- rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
- result = rfkill_register(*rfkill);
- if (result) {
- rfkill_destroy(*rfkill);
- *rfkill = NULL;
- return result;
+ return old;
+}
+
+static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
+{
+ struct backlight_device *bd;
+
+ bd = backlight_device_register(EEEPC_LAPTOP_FILE,
+ &eeepc->platform_device->dev,
+ eeepc, &eeepcbl_ops);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register eeepc backlight device\n");
+ eeepc->backlight_device = NULL;
+ return PTR_ERR(bd);
}
+ eeepc->backlight_device = bd;
+ bd->props.max_brightness = 15;
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
return 0;
}
+static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
+{
+ if (eeepc->backlight_device)
+ backlight_device_unregister(eeepc->backlight_device);
+ eeepc->backlight_device = NULL;
+}
+
-static int eeepc_rfkill_init(struct device *dev)
+/*
+ * Input device (i.e. hotkeys)
+ */
+static int eeepc_input_init(struct eeepc_laptop *eeepc)
{
- int result = 0;
+ struct input_dev *input;
+ int error;
+
+ input = input_allocate_device();
+ if (!input) {
+ pr_info("Unable to allocate input device\n");
+ return -ENOMEM;
+ }
- mutex_init(&ehotk->hotplug_lock);
+ input->name = "Asus EeePC extra buttons";
+ input->phys = EEEPC_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &eeepc->platform_device->dev;
- result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
- "eeepc-wlan", dev,
- RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+ error = sparse_keymap_setup(input, eeepc_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_free_dev;
+ }
- if (result && result != -ENODEV)
- goto exit;
+ error = input_register_device(input);
+ if (error) {
+ pr_err("Unable to register input device\n");
+ goto err_free_keymap;
+ }
- result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
- "eeepc-bluetooth", dev,
- RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
+ eeepc->inputdev = input;
+ return 0;
- if (result && result != -ENODEV)
- goto exit;
+ err_free_keymap:
+ sparse_keymap_free(input);
+ err_free_dev:
+ input_free_device(input);
+ return error;
+}
- result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
- "eeepc-wwan3g", dev,
- RFKILL_TYPE_WWAN, CM_ASL_3G);
+static void eeepc_input_exit(struct eeepc_laptop *eeepc)
+{
+ if (eeepc->inputdev) {
+ input_unregister_device(eeepc->inputdev);
+ kfree(eeepc->keymap);
+ }
+}
- if (result && result != -ENODEV)
- goto exit;
+/*
+ * ACPI driver
+ */
+static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
+ u16 count;
- result = eeepc_new_rfkill(&ehotk->wimax_rfkill,
- "eeepc-wimax", dev,
- RFKILL_TYPE_WIMAX, CM_ASL_WIMAX);
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+ count = eeepc->event_count[event % 128]++;
+ acpi_bus_generate_proc_event(device, event, count);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ count);
- if (result && result != -ENODEV)
- goto exit;
+ /* Brightness events are special */
+ if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) {
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (eeepc->backlight_device != NULL) {
+ int old_brightness, new_brightness;
+
+ /* Update the backlight device. */
+ old_brightness = eeepc_backlight_notify(eeepc);
+
+ /* Convert event to keypress (obsolescent hack) */
+ new_brightness = event - NOTIFY_BRN_MIN;
+
+ if (new_brightness < old_brightness) {
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ } else if (new_brightness > old_brightness) {
+ event = NOTIFY_BRN_MAX; /* brightness up */
+ } else {
+ /*
+ * no change in brightness - already at min/max,
+ * event will be desired value (or else ignored)
+ */
+ }
+ sparse_keymap_report_event(eeepc->inputdev, event,
+ 1, true);
+ }
+ } else {
+ /* Everything else is a bona-fide keypress event */
+ sparse_keymap_report_event(eeepc->inputdev, event, 1, true);
+ }
+}
+
+static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
+{
+ const char *model;
+
+ model = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!model)
+ return;
- result = eeepc_setup_pci_hotplug();
/*
- * If we get -EBUSY then something else is handling the PCI hotplug -
- * don't fail in this case
+ * Blacklist for setting cpufv (cpu speed).
+ *
+ * EeePC 4G ("701") implements CFVS, but it is not supported
+ * by the pre-installed OS, and the original option to change it
+ * in the BIOS setup screen was removed in later versions.
+ *
+ * Judging by the lack of "Super Hybrid Engine" on Asus product pages,
+ * this applies to all "701" models (4G/4G Surf/2G Surf).
+ *
+ * So Asus made a deliberate decision not to support it on this model.
+ * We have several reports that using it can cause the system to hang
+ *
+ * The hang has also been reported on a "702" (Model name "8G"?).
+ *
+ * We avoid dmi_check_system() / dmi_match(), because they use
+ * substring matching. We don't want to affect the "701SD"
+ * and "701SDX" models, because they do support S.H.E.
*/
- if (result == -EBUSY)
- result = 0;
+ if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
+ eeepc->cpufv_disabled = true;
+ pr_info("model %s does not officially support setting cpu "
+ "speed\n", model);
+ pr_info("cpufv disabled to avoid instability\n");
+ }
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P5");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
/*
- * Refresh pci hotplug in case the rfkill state was changed during
- * setup.
+ * Blacklist for wlan hotplug
+ *
+ * Eeepc 1005HA doesn't work like others models and don't need the
+ * hotplug code. In fact, current hotplug code seems to unplug another
+ * device...
*/
- eeepc_rfkill_hotplug();
-
-exit:
- if (result && result != -ENODEV)
- eeepc_rfkill_exit();
- return result;
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) {
+ eeepc->hotplug_disabled = true;
+ pr_info("wlan hotplug disabled\n");
+ }
}
-static int eeepc_backlight_init(struct device *dev)
+static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
{
- struct backlight_device *bd;
+ int dummy;
- bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
- NULL, &eeepcbl_ops);
- if (IS_ERR(bd)) {
- pr_err("Could not register eeepc backlight device\n");
- eeepc_backlight_device = NULL;
- return PTR_ERR(bd);
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(eeepc->cm_supported & (1 << cm))
+ && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ eeepc->cm_supported |= 1 << cm;
}
- eeepc_backlight_device = bd;
- bd->props.max_brightness = 15;
- bd->props.brightness = read_brightness(NULL);
- bd->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- return 0;
}
-static int eeepc_hwmon_init(struct device *dev)
+static void cmsg_quirks(struct eeepc_laptop *eeepc)
{
- struct device *hwmon;
- int result;
-
- hwmon = hwmon_device_register(dev);
- if (IS_ERR(hwmon)) {
- pr_err("Could not register eeepc hwmon device\n");
- eeepc_hwmon_device = NULL;
- return PTR_ERR(hwmon);
- }
- eeepc_hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj,
- &hwmon_attribute_group);
- if (result)
- eeepc_hwmon_exit();
- return result;
+ cmsg_quirk(eeepc, CM_ASL_LID, "LID");
+ cmsg_quirk(eeepc, CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(eeepc, CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
}
-static int eeepc_input_init(struct device *dev)
+static int eeepc_acpi_init(struct eeepc_laptop *eeepc,
+ struct acpi_device *device)
{
- const struct key_entry *key;
+ unsigned int init_flags;
int result;
- ehotk->inputdev = input_allocate_device();
- if (!ehotk->inputdev) {
- pr_info("Unable to allocate input device\n");
- return -ENOMEM;
+ result = acpi_bus_get_status(device);
+ if (result)
+ return result;
+ if (!device->status.present) {
+ pr_err("Hotkey device not present, aborting\n");
+ return -ENODEV;
}
- ehotk->inputdev->name = "Asus EeePC extra buttons";
- ehotk->inputdev->dev.parent = dev;
- ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
- ehotk->inputdev->id.bustype = BUS_HOST;
- ehotk->inputdev->getkeycode = eeepc_getkeycode;
- ehotk->inputdev->setkeycode = eeepc_setkeycode;
-
- for (key = eeepc_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, ehotk->inputdev->evbit);
- set_bit(key->keycode, ehotk->inputdev->keybit);
- break;
- }
+
+ init_flags = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+ pr_notice("Hotkey init flags 0x%x\n", init_flags);
+
+ if (write_acpi_int(eeepc->handle, "INIT", init_flags)) {
+ pr_err("Hotkey initialization failed\n");
+ return -ENODEV;
}
- result = input_register_device(ehotk->inputdev);
- if (result) {
- pr_info("Unable to register input device\n");
- input_free_device(ehotk->inputdev);
- return result;
+
+ /* get control methods supported */
+ if (read_acpi_int(eeepc->handle, "CMSG", &eeepc->cm_supported)) {
+ pr_err("Get control methods supported failed\n");
+ return -ENODEV;
}
+ cmsg_quirks(eeepc);
+ pr_info("Get control methods supported: 0x%x\n", eeepc->cm_supported);
+
return 0;
}
-static int __devinit eeepc_hotk_add(struct acpi_device *device)
+static void __devinit eeepc_enable_camera(struct eeepc_laptop *eeepc)
{
- struct device *dev;
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ if (get_acpi(eeepc, CM_ASL_CAMERA) == 0)
+ set_acpi(eeepc, CM_ASL_CAMERA, 1);
+}
+
+static bool eeepc_device_present;
+
+static int __devinit eeepc_acpi_add(struct acpi_device *device)
+{
+ struct eeepc_laptop *eeepc;
int result;
- if (!device)
- return -EINVAL;
- pr_notice(EEEPC_HOTK_NAME "\n");
- ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
- if (!ehotk)
+ pr_notice(EEEPC_LAPTOP_NAME "\n");
+ eeepc = kzalloc(sizeof(struct eeepc_laptop), GFP_KERNEL);
+ if (!eeepc)
return -ENOMEM;
- ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
- ehotk->handle = device->handle;
- strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
- device->driver_data = ehotk;
- ehotk->device = device;
-
- result = eeepc_hotk_check();
- if (result)
- goto fail_platform_driver;
- eeepc_enable_camera();
+ eeepc->handle = device->handle;
+ strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
+ strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
+ device->driver_data = eeepc;
- /* Register platform stuff */
- result = platform_driver_register(&platform_driver);
- if (result)
- goto fail_platform_driver;
- platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
- if (!platform_device) {
- result = -ENOMEM;
- goto fail_platform_device1;
- }
- result = platform_device_add(platform_device);
- if (result)
- goto fail_platform_device2;
- result = sysfs_create_group(&platform_device->dev.kobj,
- &platform_attribute_group);
+ eeepc->hotplug_disabled = hotplug_disabled;
+
+ eeepc_dmi_check(eeepc);
+
+ result = eeepc_acpi_init(eeepc, device);
if (result)
- goto fail_sysfs;
+ goto fail_platform;
+ eeepc_enable_camera(eeepc);
- dev = &platform_device->dev;
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ *
+ * Note that if there are multiple instances of this ACPI device it
+ * will bail out, because the platform device is registered with a
+ * fixed name. Of course it doesn't make sense to have more than one,
+ * and machine-specific scripts find the fixed name convenient. But
+ * It's also good for us to exclude multiple instances because both
+ * our hwmon and our wlan rfkill subdevice use global ACPI objects
+ * (the EC and the wlan PCI slot respectively).
+ */
+ result = eeepc_platform_init(eeepc);
+ if (result)
+ goto fail_platform;
if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
+ result = eeepc_backlight_init(eeepc);
if (result)
goto fail_backlight;
} else
- pr_info("Backlight controlled by ACPI video "
- "driver\n");
+ pr_info("Backlight controlled by ACPI video driver\n");
- result = eeepc_input_init(dev);
+ result = eeepc_input_init(eeepc);
if (result)
goto fail_input;
- result = eeepc_hwmon_init(dev);
+ result = eeepc_hwmon_init(eeepc);
if (result)
goto fail_hwmon;
- result = eeepc_rfkill_init(dev);
+ result = eeepc_led_init(eeepc);
+ if (result)
+ goto fail_led;
+
+ result = eeepc_rfkill_init(eeepc);
if (result)
goto fail_rfkill;
+ eeepc_device_present = true;
return 0;
fail_rfkill:
- eeepc_hwmon_exit();
+ eeepc_led_exit(eeepc);
+fail_led:
+ eeepc_hwmon_exit(eeepc);
fail_hwmon:
- eeepc_input_exit();
+ eeepc_input_exit(eeepc);
fail_input:
- eeepc_backlight_exit();
+ eeepc_backlight_exit(eeepc);
fail_backlight:
- sysfs_remove_group(&platform_device->dev.kobj,
- &platform_attribute_group);
-fail_sysfs:
- platform_device_del(platform_device);
-fail_platform_device2:
- platform_device_put(platform_device);
-fail_platform_device1:
- platform_driver_unregister(&platform_driver);
-fail_platform_driver:
- kfree(ehotk);
+ eeepc_platform_exit(eeepc);
+fail_platform:
+ kfree(eeepc);
return result;
}
-static int eeepc_hotk_remove(struct acpi_device *device, int type)
+static int eeepc_acpi_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
- eeepc_backlight_exit();
- eeepc_rfkill_exit();
- eeepc_input_exit();
- eeepc_hwmon_exit();
- sysfs_remove_group(&platform_device->dev.kobj,
- &platform_attribute_group);
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
+ eeepc_backlight_exit(eeepc);
+ eeepc_rfkill_exit(eeepc);
+ eeepc_input_exit(eeepc);
+ eeepc_hwmon_exit(eeepc);
+ eeepc_led_exit(eeepc);
+ eeepc_platform_exit(eeepc);
- kfree(ehotk);
+ kfree(eeepc);
return 0;
}
+
+static const struct acpi_device_id eeepc_device_ids[] = {
+ {EEEPC_ACPI_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
+
+static struct acpi_driver eeepc_acpi_driver = {
+ .name = EEEPC_LAPTOP_NAME,
+ .class = EEEPC_ACPI_CLASS,
+ .owner = THIS_MODULE,
+ .ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = eeepc_acpi_add,
+ .remove = eeepc_acpi_remove,
+ .notify = eeepc_acpi_notify,
+ },
+};
+
+
static int __init eeepc_laptop_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
- result = acpi_bus_register_driver(&eeepc_hotk_driver);
+ result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
- if (!ehotk) {
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
- return -ENODEV;
+
+ result = acpi_bus_register_driver(&eeepc_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+
+ if (!eeepc_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
}
+
return 0;
+
+fail_no_device:
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
+ return result;
}
static void __exit eeepc_laptop_exit(void)
{
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+ platform_driver_unregister(&platform_driver);
}
module_init(eeepc_laptop_init);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index bcd4ba8..5f3320d 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -164,7 +164,7 @@ struct fujitsu_hotkey_t {
struct input_dev *input;
char phys[32];
struct platform_device *pf_device;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
int rfkill_supported;
int rfkill_state;
@@ -376,8 +376,8 @@ static int get_lcd_level(void)
status =
acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
- if (status < 0)
- return status;
+ if (ACPI_FAILURE(status))
+ return 0;
fujitsu->brightness_level = state & 0x0fffffff;
@@ -398,8 +398,8 @@ static int get_max_brightness(void)
status =
acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
- if (status < 0)
- return status;
+ if (ACPI_FAILURE(status))
+ return -1;
fujitsu->max_brightness = state;
@@ -824,12 +824,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
/* kfifo */
spin_lock_init(&fujitsu_hotkey->fifo_lock);
- fujitsu_hotkey->fifo =
- kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
- &fujitsu_hotkey->fifo_lock);
- if (IS_ERR(fujitsu_hotkey->fifo)) {
+ error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+ GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "kfifo_alloc failed\n");
- error = PTR_ERR(fujitsu_hotkey->fifo);
goto err_stop;
}
@@ -934,7 +932,7 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_free_fifo:
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
err_stop:
return result;
}
@@ -956,7 +954,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
input_free_device(input);
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
fujitsu_hotkey->acpi_handle = NULL;
@@ -1008,9 +1006,10 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"Push keycode into ringbuffer [%d]\n",
keycode);
- status = kfifo_put(fujitsu_hotkey->fifo,
+ status = kfifo_in_locked(&fujitsu_hotkey->fifo,
(unsigned char *)&keycode,
- sizeof(keycode));
+ sizeof(keycode),
+ &fujitsu_hotkey->fifo_lock);
if (status != sizeof(keycode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Could not push keycode [0x%x]\n",
@@ -1021,11 +1020,12 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
} else if (keycode == 0) {
while ((status =
- kfifo_get
- (fujitsu_hotkey->fifo, (unsigned char *)
- &keycode_r,
- sizeof
- (keycode_r))) == sizeof(keycode_r)) {
+ kfifo_out_locked(
+ &fujitsu_hotkey->fifo,
+ (unsigned char *) &keycode_r,
+ sizeof(keycode_r),
+ &fujitsu_hotkey->fifo_lock))
+ == sizeof(keycode_r)) {
input_report_key(input, keycode_r, 0);
input_sync(input);
vdbg_printk(FUJLAPTOP_DBG_TRACE,
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f00a71c..ad4c414d 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -51,6 +51,12 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
+enum hp_wmi_radio {
+ HPWMI_WIFI = 0,
+ HPWMI_BLUETOOTH = 1,
+ HPWMI_WWAN = 2,
+};
+
static int __init hp_wmi_bios_setup(struct platform_device *device);
static int __exit hp_wmi_bios_remove(struct platform_device *device);
static int hp_wmi_resume_handler(struct device *device);
@@ -128,10 +134,15 @@ static int hp_wmi_perform_query(int query, int write, int value)
obj = output.pointer;
- if (!obj || obj->type != ACPI_TYPE_BUFFER)
+ if (!obj)
return -EINVAL;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return -EINVAL;
+ }
bios_return = *((struct bios_return *)obj->buffer.pointer);
+ kfree(obj);
if (bios_return.return_code > 0)
return bios_return.return_code * -1;
else
@@ -175,8 +186,8 @@ static int hp_wmi_tablet_state(void)
static int hp_wmi_set_block(void *data, bool blocked)
{
- unsigned long b = (unsigned long) data;
- int query = BIT(b + 8) | ((!blocked) << b);
+ enum hp_wmi_radio r = (enum hp_wmi_radio) data;
+ int query = BIT(r + 8) | ((!blocked) << r);
return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
}
@@ -185,31 +196,23 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
.set_block = hp_wmi_set_block,
};
-static bool hp_wmi_wifi_state(void)
-{
- int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
- if (wireless & 0x100)
- return false;
- else
- return true;
-}
-
-static bool hp_wmi_bluetooth_state(void)
+static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+ int mask = 0x200 << (r * 8);
- if (wireless & 0x10000)
+ if (wireless & mask)
return false;
else
return true;
}
-static bool hp_wmi_wwan_state(void)
+static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+ int mask = 0x800 << (r * 8);
- if (wireless & 0x1000000)
+ if (wireless & mask)
return false;
else
return true;
@@ -334,49 +337,62 @@ static void hp_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
+ int eventcode;
+ acpi_status status;
- wmi_get_event_data(value, &response);
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
+ return;
+ }
obj = (union acpi_object *)response.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
- int eventcode = *((u8 *) obj->buffer.pointer);
- if (eventcode == 0x4)
- eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- 0);
- key = hp_wmi_get_entry_by_scancode(eventcode);
- if (key) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(hp_wmi_input_dev,
- key->keycode, 1);
- input_sync(hp_wmi_input_dev);
- input_report_key(hp_wmi_input_dev,
- key->keycode, 0);
- input_sync(hp_wmi_input_dev);
- break;
- }
- } else if (eventcode == 0x1) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
+ printk(KERN_INFO "HP WMI: Unknown response received\n");
+ kfree(obj);
+ return;
+ }
+
+ eventcode = *((u8 *) obj->buffer.pointer);
+ kfree(obj);
+ if (eventcode == 0x4)
+ eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
+ 0);
+ key = hp_wmi_get_entry_by_scancode(eventcode);
+ if (key) {
+ switch (key->type) {
+ case KE_KEY:
+ input_report_key(hp_wmi_input_dev,
+ key->keycode, 1);
+ input_sync(hp_wmi_input_dev);
+ input_report_key(hp_wmi_input_dev,
+ key->keycode, 0);
input_sync(hp_wmi_input_dev);
- } else if (eventcode == 0x5) {
- if (wifi_rfkill)
- rfkill_set_sw_state(wifi_rfkill,
- hp_wmi_wifi_state());
- if (bluetooth_rfkill)
- rfkill_set_sw_state(bluetooth_rfkill,
- hp_wmi_bluetooth_state());
- if (wwan_rfkill)
- rfkill_set_sw_state(wwan_rfkill,
- hp_wmi_wwan_state());
- } else
- printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
- eventcode);
+ break;
+ }
+ } else if (eventcode == 0x1) {
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+ } else if (eventcode == 0x5) {
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
} else
- printk(KERN_INFO "HP WMI: Unknown response received\n");
+ printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
+ eventcode);
}
static int __init hp_wmi_input_setup(void)
@@ -455,7 +471,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
- (void *) 0);
+ (void *) HPWMI_WIFI);
+ rfkill_init_sw_state(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI));
+ rfkill_set_hw_state(wifi_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WIFI));
err = rfkill_register(wifi_rfkill);
if (err)
goto register_wifi_error;
@@ -465,7 +485,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
- (void *) 1);
+ (void *) HPWMI_BLUETOOTH);
+ rfkill_init_sw_state(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
+ rfkill_set_hw_state(bluetooth_rfkill,
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
@@ -475,7 +499,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
- (void *) 2);
+ (void *) HPWMI_WWAN);
+ rfkill_init_sw_state(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN));
+ rfkill_set_hw_state(wwan_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_err;
@@ -533,6 +561,19 @@ static int hp_wmi_resume_handler(struct device *device)
input_sync(hp_wmi_input_dev);
}
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+
return 0;
}
@@ -543,7 +584,7 @@ static int __init hp_wmi_init(void)
if (wmi_has_guid(HPWMI_EVENT_GUID)) {
err = wmi_install_notify_handler(HPWMI_EVENT_GUID,
hp_wmi_notify, NULL);
- if (!err)
+ if (ACPI_SUCCESS(err))
hp_wmi_input_setup();
}
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
new file mode 100644
index 0000000..f5f70d4
--- /dev/null
+++ b/drivers/platform/x86/msi-wmi.c
@@ -0,0 +1,288 @@
+/*
+ * MSI WMI hotkeys
+ *
+ * Copyright (C) 2009 Novell <trenn@suse.de>
+ *
+ * Most stuff taken over from hp-wmi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+
+MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
+MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
+
+#define DRV_NAME "msi-wmi"
+#define DRV_PFX DRV_NAME ": "
+
+#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
+#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+
+#define dprintk(msg...) pr_debug(DRV_PFX msg)
+
+#define KEYCODE_BASE 0xD0
+#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE
+#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1)
+#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2)
+#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3)
+static struct key_entry msi_wmi_keymap[] = {
+ { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
+ { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
+ { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
+ { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_END, 0}
+};
+static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
+
+struct backlight_device *backlight;
+
+static int backlight_map[] = { 0x00, 0x33, 0x66, 0x99, 0xCC, 0xFF };
+
+static struct input_dev *msi_wmi_input_dev;
+
+static int msi_wmi_query_block(int instance, int *ret)
+{
+ acpi_status status;
+ union acpi_object *obj;
+
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output);
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ printk(KERN_ERR DRV_PFX "query block returned object "
+ "type: %d - buffer length:%d\n", obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ kfree(obj);
+ return -EINVAL;
+ }
+ *ret = obj->integer.value;
+ kfree(obj);
+ return 0;
+}
+
+static int msi_wmi_set_block(int instance, int value)
+{
+ acpi_status status;
+
+ struct acpi_buffer input = { sizeof(int), &value };
+
+ dprintk("Going to set block of instance: %d - value: %d\n",
+ instance, value);
+
+ status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
+
+ return ACPI_SUCCESS(status) ? 0 : 1;
+}
+
+static int bl_get(struct backlight_device *bd)
+{
+ int level, err, ret;
+
+ /* Instance 1 is "get backlight", cmp with DSDT */
+ err = msi_wmi_query_block(1, &ret);
+ if (err) {
+ printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err);
+ return -EINVAL;
+ }
+ dprintk("Get: Query block returned: %d\n", ret);
+ for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
+ if (backlight_map[level] == ret) {
+ dprintk("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
+ break;
+ }
+ }
+ if (level == ARRAY_SIZE(backlight_map)) {
+ printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n",
+ ret);
+ return -EINVAL;
+ }
+ return level;
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+ int bright = bd->props.brightness;
+ if (bright >= ARRAY_SIZE(backlight_map) || bright < 0)
+ return -EINVAL;
+
+ /* Instance 0 is "set backlight" */
+ return msi_wmi_set_block(0, backlight_map[bright]);
+}
+
+static struct backlight_ops msi_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+};
+
+static void msi_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ static struct key_entry *key;
+ union acpi_object *obj;
+ ktime_t cur;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_INTEGER) {
+ int eventcode = obj->integer.value;
+ dprintk("Eventcode: 0x%x\n", eventcode);
+ key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
+ eventcode);
+ if (key) {
+ ktime_t diff;
+ cur = ktime_get_real();
+ diff = ktime_sub(cur, last_pressed[key->code -
+ KEYCODE_BASE]);
+ /* Ignore event if the same event happened in a 50 ms
+ timeframe -> Key press may result in 10-20 GPEs */
+ if (ktime_to_us(diff) < 1000 * 50) {
+ dprintk("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
+ key->code, ktime_to_us(diff));
+ return;
+ }
+ last_pressed[key->code - KEYCODE_BASE] = cur;
+
+ if (key->type == KE_KEY &&
+ /* Brightness is served via acpi video driver */
+ (!acpi_video_backlight_support() ||
+ (key->code != MSI_WMI_BRIGHTNESSUP &&
+ key->code != MSI_WMI_BRIGHTNESSDOWN))) {
+ dprintk("Send key: 0x%X - "
+ "Input layer keycode: %d\n", key->code,
+ key->keycode);
+ sparse_keymap_report_entry(msi_wmi_input_dev,
+ key, 1, true);
+ }
+ } else
+ printk(KERN_INFO "Unknown key pressed - %x\n",
+ eventcode);
+ } else
+ printk(KERN_INFO DRV_PFX "Unknown event received\n");
+ kfree(response.pointer);
+}
+
+static int __init msi_wmi_input_setup(void)
+{
+ int err;
+
+ msi_wmi_input_dev = input_allocate_device();
+ if (!msi_wmi_input_dev)
+ return -ENOMEM;
+
+ msi_wmi_input_dev->name = "MSI WMI hotkeys";
+ msi_wmi_input_dev->phys = "wmi/input0";
+ msi_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(msi_wmi_input_dev, msi_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(msi_wmi_input_dev);
+
+ if (err)
+ goto err_free_keymap;
+
+ memset(last_pressed, 0, sizeof(last_pressed));
+
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(msi_wmi_input_dev);
+err_free_dev:
+ input_free_device(msi_wmi_input_dev);
+ return err;
+}
+
+static int __init msi_wmi_init(void)
+{
+ int err;
+
+ if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
+ printk(KERN_ERR
+ "This machine doesn't have MSI-hotkeys through WMI\n");
+ return -ENODEV;
+ }
+ err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
+ msi_wmi_notify, NULL);
+ if (ACPI_FAILURE(err))
+ return -EINVAL;
+
+ err = msi_wmi_input_setup();
+ if (err)
+ goto err_uninstall_notifier;
+
+ if (!acpi_video_backlight_support()) {
+ backlight = backlight_device_register(DRV_NAME,
+ NULL, NULL, &msi_backlight_ops);
+ if (IS_ERR(backlight))
+ goto err_free_input;
+
+ backlight->props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+ err = bl_get(NULL);
+ if (err < 0)
+ goto err_free_backlight;
+
+ backlight->props.brightness = err;
+ }
+ dprintk("Event handler installed\n");
+
+ return 0;
+
+err_free_backlight:
+ backlight_device_unregister(backlight);
+err_free_input:
+ input_unregister_device(msi_wmi_input_dev);
+err_uninstall_notifier:
+ wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ return err;
+}
+
+static void __exit msi_wmi_exit(void)
+{
+ if (wmi_has_guid(MSIWMI_EVENT_GUID)) {
+ wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ sparse_keymap_free(msi_wmi_input_dev);
+ input_unregister_device(msi_wmi_input_dev);
+ backlight_device_unregister(backlight);
+ }
+}
+
+module_init(msi_wmi_init);
+module_exit(msi_wmi_exit);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 7a2cc8a..3f71a60 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -131,6 +131,7 @@ enum sony_nc_rfkill {
N_SONY_RFKILL,
};
+static int sony_rfkill_handle;
static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
static void sony_nc_rfkill_update(void);
@@ -142,7 +143,7 @@ struct sony_laptop_input_s {
atomic_t users;
struct input_dev *jog_dev;
struct input_dev *key_dev;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
struct workqueue_struct *wq;
};
@@ -232,6 +233,7 @@ static int sony_laptop_input_index[] = {
56, /* 69 SONYPI_EVENT_VOLUME_INC_PRESSED */
57, /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
-1, /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
+ 58, /* 72 SONYPI_EVENT_MEDIA_PRESSED */
};
static int sony_laptop_input_keycode_map[] = {
@@ -293,6 +295,7 @@ static int sony_laptop_input_keycode_map[] = {
KEY_F15, /* 55 SONYPI_EVENT_SETTINGKEY_PRESSED */
KEY_VOLUMEUP, /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
+ KEY_MEDIA, /* 58 SONYPI_EVENT_MEDIA_PRESSED */
};
/* release buttons after a short delay if pressed */
@@ -300,8 +303,9 @@ static void do_sony_laptop_release_key(struct work_struct *work)
{
struct sony_laptop_keypress kp;
- while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp,
- sizeof(kp)) == sizeof(kp)) {
+ while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp,
+ sizeof(kp), &sony_laptop_input.fifo_lock)
+ == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
@@ -362,8 +366,9 @@ static void sony_laptop_report_input_event(u8 event)
/* we emit the scancode so we can always remap the key */
input_event(kp.dev, EV_MSC, MSC_SCAN, event);
input_sync(kp.dev);
- kfifo_put(sony_laptop_input.fifo,
- (unsigned char *)&kp, sizeof(kp));
+ kfifo_in_locked(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sony_laptop_input.fifo_lock);
if (!work_pending(&sony_laptop_release_key_work))
queue_work(sony_laptop_input.wq,
@@ -385,12 +390,10 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
/* kfifo */
spin_lock_init(&sony_laptop_input.fifo_lock);
- sony_laptop_input.fifo =
- kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
- &sony_laptop_input.fifo_lock);
- if (IS_ERR(sony_laptop_input.fifo)) {
+ error =
+ kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- error = PTR_ERR(sony_laptop_input.fifo);
goto err_dec_users;
}
@@ -474,7 +477,7 @@ err_destroy_wq:
destroy_workqueue(sony_laptop_input.wq);
err_free_kfifo:
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
err_dec_users:
atomic_dec(&sony_laptop_input.users);
@@ -500,7 +503,7 @@ static void sony_laptop_remove_input(void)
}
destroy_workqueue(sony_laptop_input.wq);
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
}
/*********** Platform Device ***********/
@@ -890,6 +893,8 @@ static struct sony_nc_event sony_100_events[] = {
{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
{ 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
+ { 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
+ { 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 },
};
@@ -961,7 +966,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
else
sony_laptop_report_input_event(ev);
}
- } else if (sony_find_snc_handle(0x124) == ev) {
+ } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
sony_nc_rfkill_update();
return;
}
@@ -1067,7 +1072,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
if (!blocked)
argument |= 0xff0000;
- return sony_call_snc_handle(0x124, argument, &result);
+ return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
static const struct rfkill_ops sony_rfkill_ops = {
@@ -1110,7 +1115,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
if (!rfk)
return -ENOMEM;
- sony_call_snc_handle(0x124, 0x200, &result);
+ sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
hwblock = !(result & 0x1);
rfkill_set_hw_state(rfk, hwblock);
@@ -1129,7 +1134,7 @@ static void sony_nc_rfkill_update()
int result;
bool hwblock;
- sony_call_snc_handle(0x124, 0x200, &result);
+ sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
hwblock = !(result & 0x1);
for (i = 0; i < N_SONY_RFKILL; i++) {
@@ -1145,36 +1150,82 @@ static void sony_nc_rfkill_update()
continue;
}
- sony_call_snc_handle(0x124, argument, &result);
+ sony_call_snc_handle(sony_rfkill_handle, argument, &result);
rfkill_set_states(sony_rfkill_devices[i],
!(result & 0xf), false);
}
}
-static int sony_nc_rfkill_setup(struct acpi_device *device)
+static void sony_nc_rfkill_setup(struct acpi_device *device)
{
- int result, ret;
+ int offset;
+ u8 dev_code, i;
+ acpi_status status;
+ struct acpi_object_list params;
+ union acpi_object in_obj;
+ union acpi_object *device_enum;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- if (sony_find_snc_handle(0x124) == -1)
- return -1;
+ offset = sony_find_snc_handle(0x124);
+ if (offset == -1) {
+ offset = sony_find_snc_handle(0x135);
+ if (offset == -1)
+ return;
+ else
+ sony_rfkill_handle = 0x135;
+ } else
+ sony_rfkill_handle = 0x124;
+ dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
- ret = sony_call_snc_handle(0x124, 0xb00, &result);
- if (ret) {
- printk(KERN_INFO DRV_PFX
- "Unable to enumerate rfkill devices: %x\n", ret);
- return ret;
+ /* need to read the whole buffer returned by the acpi call to SN06
+ * here otherwise we may miss some features
+ */
+ params.count = 1;
+ params.pointer = &in_obj;
+ in_obj.type = ACPI_TYPE_INTEGER;
+ in_obj.integer.value = offset;
+ status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ dprintk("Radio device enumeration failed\n");
+ return;
}
- if (result & 0x1)
- sony_nc_setup_rfkill(device, SONY_WIFI);
- if (result & 0x2)
- sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
- if (result & 0x1c)
- sony_nc_setup_rfkill(device, SONY_WWAN);
- if (result & 0x20)
- sony_nc_setup_rfkill(device, SONY_WIMAX);
+ device_enum = (union acpi_object *) buffer.pointer;
+ if (!device_enum || device_enum->type != ACPI_TYPE_BUFFER) {
+ printk(KERN_ERR "Invalid SN06 return object 0x%.2x\n",
+ device_enum->type);
+ goto out_no_enum;
+ }
- return 0;
+ /* the buffer is filled with magic numbers describing the devices
+ * available, 0xff terminates the enumeration
+ */
+ for (i = 0; i < device_enum->buffer.length; i++) {
+
+ dev_code = *(device_enum->buffer.pointer + i);
+ if (dev_code == 0xff)
+ break;
+
+ dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+
+ if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+ sony_nc_setup_rfkill(device, SONY_WIFI);
+
+ if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+ sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
+
+ if ((0xf0 & dev_code) == 0x20 &&
+ !sony_rfkill_devices[SONY_WWAN])
+ sony_nc_setup_rfkill(device, SONY_WWAN);
+
+ if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+ sony_nc_setup_rfkill(device, SONY_WIMAX);
+ }
+
+out_no_enum:
+ kfree(buffer.pointer);
+ return;
}
static int sony_nc_add(struct acpi_device *device)
@@ -2079,7 +2130,7 @@ static struct attribute_group spic_attribute_group = {
struct sonypi_compat_s {
struct fasync_struct *fifo_async;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
atomic_t open_count;
@@ -2104,12 +2155,12 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
/* Flush input queue on first open */
unsigned long flags;
- spin_lock_irqsave(sonypi_compat.fifo->lock, flags);
+ spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
if (atomic_inc_return(&sonypi_compat.open_count) == 1)
- __kfifo_reset(sonypi_compat.fifo);
+ kfifo_reset(&sonypi_compat.fifo);
- spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags);
+ spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
return 0;
}
@@ -2120,17 +2171,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_compat.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
- kfifo_len(sonypi_compat.fifo) != 0);
+ kfifo_len(&sonypi_compat.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c),
+ &sonypi_compat.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
@@ -2147,7 +2199,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
- if (kfifo_len(sonypi_compat.fifo))
+ if (kfifo_len(&sonypi_compat.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
@@ -2309,7 +2361,8 @@ static struct miscdevice sonypi_misc_device = {
static void sonypi_compat_report_event(u8 event)
{
- kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_compat.fifo_lock);
kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_compat.fifo_proc_list);
}
@@ -2319,11 +2372,11 @@ static int sonypi_compat_init(void)
int error;
spin_lock_init(&sonypi_compat.fifo_lock);
- sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
- &sonypi_compat.fifo_lock);
- if (IS_ERR(sonypi_compat.fifo)) {
+ error =
+ kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- return PTR_ERR(sonypi_compat.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_compat.fifo_proc_list);
@@ -2342,14 +2395,14 @@ static int sonypi_compat_init(void)
return 0;
err_free_kfifo:
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
return error;
}
static void sonypi_compat_exit(void)
{
misc_deregister(&sonypi_misc_device);
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
}
#else
static int sonypi_compat_init(void) { return 0; }
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 4416600..dd33b51 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -47,22 +47,6 @@ MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
-static int tc1100_probe(struct platform_device *device);
-static int tc1100_remove(struct platform_device *device);
-static int tc1100_suspend(struct platform_device *device, pm_message_t state);
-static int tc1100_resume(struct platform_device *device);
-
-static struct platform_driver tc1100_driver = {
- .driver = {
- .name = "tc1100-wmi",
- .owner = THIS_MODULE,
- },
- .probe = tc1100_probe,
- .remove = tc1100_remove,
- .suspend = tc1100_suspend,
- .resume = tc1100_resume,
-};
-
static struct platform_device *tc1100_device;
struct tc1100_data {
@@ -183,51 +167,35 @@ static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
-static void remove_fs(void)
-{
- device_remove_file(&tc1100_device->dev, &dev_attr_wireless);
- device_remove_file(&tc1100_device->dev, &dev_attr_jogdial);
-}
-
-static int add_fs(void)
-{
- int ret;
-
- ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless);
- if (ret)
- goto add_sysfs_error;
-
- ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial);
- if (ret)
- goto add_sysfs_error;
-
- return ret;
+static struct attribute *tc1100_attributes[] = {
+ &dev_attr_wireless.attr,
+ &dev_attr_jogdial.attr,
+ NULL
+};
-add_sysfs_error:
- remove_fs();
- return ret;
-}
+static struct attribute_group tc1100_attribute_group = {
+ .attrs = tc1100_attributes,
+};
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
-static int tc1100_probe(struct platform_device *device)
+static int __init tc1100_probe(struct platform_device *device)
{
- int result = 0;
-
- result = add_fs();
- return result;
+ return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group);
}
-static int tc1100_remove(struct platform_device *device)
+static int __devexit tc1100_remove(struct platform_device *device)
{
- remove_fs();
+ sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
+
return 0;
}
-static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM
+static int tc1100_suspend(struct device *dev)
{
int ret;
@@ -239,10 +207,10 @@ static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
if (ret)
return ret;
- return ret;
+ return 0;
}
-static int tc1100_resume(struct platform_device *dev)
+static int tc1100_resume(struct device *dev)
{
int ret;
@@ -254,34 +222,61 @@ static int tc1100_resume(struct platform_device *dev)
if (ret)
return ret;
- return ret;
+ return 0;
}
+static const struct dev_pm_ops tc1100_pm_ops = {
+ .suspend = tc1100_suspend,
+ .resume = tc1100_resume,
+ .freeze = tc1100_suspend,
+ .restore = tc1100_resume,
+};
+#endif
+
+static struct platform_driver tc1100_driver = {
+ .driver = {
+ .name = "tc1100-wmi",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &tc1100_pm_ops,
+#endif
+ },
+ .remove = __devexit_p(tc1100_remove),
+};
+
static int __init tc1100_init(void)
{
- int result = 0;
+ int error;
if (!wmi_has_guid(GUID))
return -ENODEV;
- result = platform_driver_register(&tc1100_driver);
- if (result)
- return result;
-
tc1100_device = platform_device_alloc("tc1100-wmi", -1);
- platform_device_add(tc1100_device);
+ if (!tc1100_device)
+ return -ENOMEM;
+
+ error = platform_device_add(tc1100_device);
+ if (error)
+ goto err_device_put;
+
+ error = platform_driver_probe(&tc1100_driver, tc1100_probe);
+ if (error)
+ goto err_device_del;
printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+ return 0;
- return result;
+ err_device_del:
+ platform_device_del(tc1100_device);
+ err_device_put:
+ platform_device_put(tc1100_device);
+ return error;
}
static void __exit tc1100_exit(void)
{
- platform_device_del(tc1100_device);
+ platform_device_unregister(tc1100_device);
platform_driver_unregister(&tc1100_driver);
-
- printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n");
}
module_init(tc1100_init);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index cf61d6a..eb603f1d5 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,8 +21,8 @@
* 02110-1301, USA.
*/
-#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020500
+#define TPACPI_VERSION "0.24"
+#define TPACPI_SYSFS_VERSION 0x020700
/*
* Changelog:
@@ -61,6 +61,7 @@
#include <linux/nvram.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sysfs.h>
#include <linux/backlight.h>
#include <linux/fb.h>
@@ -76,6 +77,10 @@
#include <linux/jiffies.h>
#include <linux/workqueue.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
#include <acpi/acpi_drivers.h>
#include <linux/pci_ids.h>
@@ -231,6 +236,7 @@ enum tpacpi_hkey_event_t {
#define TPACPI_DBG_HKEY 0x0008
#define TPACPI_DBG_FAN 0x0010
#define TPACPI_DBG_BRGHT 0x0020
+#define TPACPI_DBG_MIXER 0x0040
#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
@@ -256,7 +262,7 @@ struct tp_acpi_drv_struct {
struct ibm_struct {
char *name;
- int (*read) (char *);
+ int (*read) (struct seq_file *);
int (*write) (char *);
void (*exit) (void);
void (*resume) (void);
@@ -298,6 +304,7 @@ static struct {
u32 fan_ctrl_status_undef:1;
u32 second_fan:1;
u32 beep_needs_two_args:1;
+ u32 mixer_no_level_control:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -309,6 +316,7 @@ static struct {
static struct {
u16 hotkey_mask_ff:1;
+ u16 volume_ctrl_forbidden:1;
} tp_warned;
struct thinkpad_id_data {
@@ -425,6 +433,12 @@ static void tpacpi_log_usertask(const char * const what)
.ec = TPACPI_MATCH_ANY, \
.quirks = (__quirk) }
+#define TPACPI_QEC_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = (__quirk) }
+
struct tpacpi_quirk {
unsigned int vendor;
u16 bios;
@@ -776,36 +790,25 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
****************************************************************************
****************************************************************************/
-static int dispatch_procfs_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int dispatch_proc_show(struct seq_file *m, void *v)
{
- struct ibm_struct *ibm = data;
- int len;
+ struct ibm_struct *ibm = m->private;
if (!ibm || !ibm->read)
return -EINVAL;
+ return ibm->read(m);
+}
- len = ibm->read(page);
- if (len < 0)
- return len;
-
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
-
- return len;
+static int dispatch_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dispatch_proc_show, PDE(inode)->data);
}
-static int dispatch_procfs_write(struct file *file,
+static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
- unsigned long count, void *data)
+ size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = data;
+ struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
char *kernbuf;
int ret;
@@ -834,6 +837,15 @@ static int dispatch_procfs_write(struct file *file,
return ret;
}
+static const struct file_operations dispatch_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = dispatch_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dispatch_proc_write,
+};
+
static char *next_cmd(char **cmds)
{
char *start = *cmds;
@@ -1261,6 +1273,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
struct tpacpi_rfk *atp_rfk;
int res;
bool sw_state = false;
+ bool hw_state;
int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1295,7 +1308,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
- rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
+ hw_state = tpacpi_rfk_check_hwblock_state();
+ rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
@@ -1308,6 +1322,9 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
}
tpacpi_rfkill_switches[id] = atp_rfk;
+
+ printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1380,12 +1397,10 @@ static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
}
/* procfs -------------------------------------------------------------- */
-static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *m)
{
- int len = 0;
-
if (id >= TPACPI_RFK_SW_MAX)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
int status;
@@ -1399,13 +1414,13 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
return status;
}
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status == TPACPI_RFK_RADIO_ON) ?
"enabled" : "disabled");
- len += sprintf(p + len, "commands:\tenable, disable\n");
+ seq_printf(m, "commands:\tenable, disable\n");
}
- return len;
+ return 0;
}
static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
@@ -1776,7 +1791,7 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
- TPV_QL0('7', 'E', 'D', '0'), /* R60e, R60i */
+ TPV_QL1('7', 'E', 'D', '0', '1', '5'), /* R60e, R60i */
/* BIOS FW BIOS VERS EC FW EC VERS */
TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
@@ -1792,8 +1807,8 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
- TPV_QL0('7', 'B', 'D', '7'), /* X60/s */
- TPV_QL0('7', 'J', '3', '0'), /* X60t */
+ TPV_QL1('7', 'B', 'D', '7', '4', '0'), /* X60/s */
+ TPV_QL1('7', 'J', '3', '0', '1', '3'), /* X60t */
/* (0) - older versions lack DMI EC fw string and functionality */
/* (1) - older versions known to lack functionality */
@@ -1883,14 +1898,11 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
return 0;
}
-static int thinkpad_acpi_driver_read(char *p)
+static int thinkpad_acpi_driver_read(struct seq_file *m)
{
- int len = 0;
-
- len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
- len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
-
- return len;
+ seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
+ seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
+ return 0;
}
static struct ibm_struct thinkpad_acpi_driver_data = {
@@ -2186,7 +2198,8 @@ static int hotkey_mask_set(u32 mask)
fwmask, hotkey_acpi_mask);
}
- hotkey_mask_warn_incomplete_mask();
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+ hotkey_mask_warn_incomplete_mask();
return rc;
}
@@ -3182,6 +3195,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
int res, i;
int status;
int hkeyv;
+ bool radiosw_state = false;
+ bool tabletsw_state = false;
unsigned long quirks;
@@ -3287,6 +3302,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!tpacpi_wlsw_emulstate;
printk(TPACPI_INFO
"radio switch emulation enabled\n");
} else
@@ -3294,6 +3310,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!status;
printk(TPACPI_INFO
"radio switch found; radios are %s\n",
enabled(status, 0));
@@ -3305,11 +3322,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* For X41t, X60t, X61t Tablets... */
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
+ tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
printk(TPACPI_INFO
"possible tablet mode switch found; "
"ThinkPad in %s mode\n",
- (status & TP_HOTKEY_TABLET_MASK)?
- "tablet" : "laptop");
+ (tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
&dev_attr_hotkey_tablet_mode.attr);
}
@@ -3344,16 +3361,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
TPACPI_HOTKEY_MAP_SIZE);
}
- set_bit(EV_KEY, tpacpi_inputdev->evbit);
- set_bit(EV_MSC, tpacpi_inputdev->evbit);
- set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
+ input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
tpacpi_inputdev->keycode = hotkey_keycode_map;
for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
if (hotkey_keycode_map[i] != KEY_RESERVED) {
- set_bit(hotkey_keycode_map[i],
- tpacpi_inputdev->keybit);
+ input_set_capability(tpacpi_inputdev, EV_KEY,
+ hotkey_keycode_map[i]);
} else {
if (i < sizeof(hotkey_reserved_mask)*8)
hotkey_reserved_mask |= 1 << i;
@@ -3361,12 +3376,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
}
if (tp_features.hotkey_wlsw) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
+ input_report_switch(tpacpi_inputdev,
+ SW_RFKILL_ALL, radiosw_state);
}
if (tp_features.hotkey_tablet) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(tpacpi_inputdev,
+ SW_TABLET_MODE, tabletsw_state);
}
/* Do not issue duplicate brightness change events to
@@ -3433,8 +3450,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
tpacpi_inputdev->close = &hotkey_inputdev_close;
hotkey_poll_setup_safe(true);
- tpacpi_send_radiosw_update();
- tpacpi_input_send_tabletsw();
return 0;
@@ -3542,49 +3557,57 @@ static bool hotkey_notify_usrevent(const u32 hkey,
}
}
+static void thermal_dump_all_sensors(void);
+
static bool hotkey_notify_thermal(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
+ bool known = true;
+
/* 0x6000-0x6FFF: thermal alarms */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
switch (hkey) {
+ case TP_HKEY_EV_THM_TABLE_CHANGED:
+ printk(TPACPI_INFO
+ "EC reports that Thermal Table has changed\n");
+ /* recommended action: do nothing, we don't have
+ * Lenovo ATM information */
+ return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
- return true;
+ break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: "
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
- case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
- /* recommended action: do nothing, we don't have
- * Lenovo ATM information */
- return true;
+ break;
default:
printk(TPACPI_ALERT
"THERMAL ALERT: unknown thermal alarm received\n");
- return false;
+ known = false;
}
+
+ thermal_dump_all_sensors();
+
+ return known;
}
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
@@ -3727,14 +3750,13 @@ static void hotkey_resume(void)
}
/* procfs -------------------------------------------------------------- */
-static int hotkey_read(char *p)
+static int hotkey_read(struct seq_file *m)
{
int res, status;
- int len = 0;
if (!tp_features.hotkey) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
if (mutex_lock_killable(&hotkey_mutex))
@@ -3746,17 +3768,16 @@ static int hotkey_read(char *p)
if (res)
return res;
- len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
if (hotkey_all_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
- len += sprintf(p + len,
- "commands:\tenable, disable, reset, <mask>\n");
+ seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
+ seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
} else {
- len += sprintf(p + len, "mask:\t\tnot supported\n");
- len += sprintf(p + len, "commands:\tenable, disable, reset\n");
+ seq_printf(m, "mask:\t\tnot supported\n");
+ seq_printf(m, "commands:\tenable, disable, reset\n");
}
- return len;
+ return 0;
}
static void hotkey_enabledisable_warn(bool enable)
@@ -3863,15 +3884,6 @@ enum {
#define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
-static void bluetooth_suspend(pm_message_t state)
-{
- /* Try to make sure radio will resume powered off */
- if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
- TP_ACPI_BLTH_PWR_OFF_ON_RESUME))
- vdbg_printk(TPACPI_DBG_RFKILL,
- "bluetooth power down on resume request failed\n");
-}
-
static int bluetooth_get_status(void)
{
int status;
@@ -3905,10 +3917,9 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
#endif
/* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
+ status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status = TP_ACPI_BLUETOOTH_RADIOSSW;
- else
- status = 0;
+ status |= TP_ACPI_BLUETOOTH_RADIOSSW;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4032,9 +4043,9 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
}
/* procfs -------------------------------------------------------------- */
-static int bluetooth_read(char *p)
+static int bluetooth_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
}
static int bluetooth_write(char *buf)
@@ -4047,7 +4058,6 @@ static struct ibm_struct bluetooth_driver_data = {
.read = bluetooth_read,
.write = bluetooth_write,
.exit = bluetooth_exit,
- .suspend = bluetooth_suspend,
.shutdown = bluetooth_shutdown,
};
@@ -4065,15 +4075,6 @@ enum {
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
-static void wan_suspend(pm_message_t state)
-{
- /* Try to make sure radio will resume powered off */
- if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
- TP_ACPI_WGSV_PWR_OFF_ON_RESUME))
- vdbg_printk(TPACPI_DBG_RFKILL,
- "WWAN power down on resume request failed\n");
-}
-
static int wan_get_status(void)
{
int status;
@@ -4106,11 +4107,10 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
+ /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
+ status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status = TP_ACPI_WANCARD_RADIOSSW;
- else
- status = 0;
+ status |= TP_ACPI_WANCARD_RADIOSSW;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4233,9 +4233,9 @@ static int __init wan_init(struct ibm_init_struct *iibm)
}
/* procfs -------------------------------------------------------------- */
-static int wan_read(char *p)
+static int wan_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
}
static int wan_write(char *buf)
@@ -4248,7 +4248,6 @@ static struct ibm_struct wan_driver_data = {
.read = wan_read,
.write = wan_write,
.exit = wan_exit,
- .suspend = wan_suspend,
.shutdown = wan_shutdown,
};
@@ -4611,14 +4610,13 @@ static int video_expand_toggle(void)
/* not reached */
}
-static int video_read(char *p)
+static int video_read(struct seq_file *m)
{
int status, autosw;
- int len = 0;
if (video_supported == TPACPI_VIDEO_NONE) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
status = video_outputsw_get();
@@ -4629,20 +4627,20 @@ static int video_read(char *p)
if (autosw < 0)
return autosw;
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
- len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
- len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
- len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
- len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
+ seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
+ seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
+ seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
- len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
- len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
+ seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
+ seq_printf(m, "commands:\tauto_enable, auto_disable\n");
+ seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
- return len;
+ return 0;
}
static int video_write(char *buf)
@@ -4834,25 +4832,24 @@ static void light_exit(void)
flush_workqueue(tpacpi_wq);
}
-static int light_read(char *p)
+static int light_read(struct seq_file *m)
{
- int len = 0;
int status;
if (!tp_features.light) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
} else if (!tp_features.light_status) {
- len += sprintf(p + len, "status:\t\tunknown\n");
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\tunknown\n");
+ seq_printf(m, "commands:\ton, off\n");
} else {
status = light_get_status();
if (status < 0)
return status;
- len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "commands:\ton, off\n");
}
- return len;
+ return 0;
}
static int light_write(char *buf)
@@ -4930,20 +4927,18 @@ static void cmos_exit(void)
device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
}
-static int cmos_read(char *p)
+static int cmos_read(struct seq_file *m)
{
- int len = 0;
-
/* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
R30, R31, T20-22, X20-21 */
if (!cmos_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
}
- return len;
+ return 0;
}
static int cmos_write(char *buf)
@@ -5318,15 +5313,13 @@ static int __init led_init(struct ibm_init_struct *iibm)
((s) == TPACPI_LED_OFF ? "off" : \
((s) == TPACPI_LED_ON ? "on" : "blinking"))
-static int led_read(char *p)
+static int led_read(struct seq_file *m)
{
- int len = 0;
-
if (!led_supported) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
- len += sprintf(p + len, "status:\t\tsupported\n");
+ seq_printf(m, "status:\t\tsupported\n");
if (led_supported == TPACPI_LED_570) {
/* 570 */
@@ -5335,15 +5328,15 @@ static int led_read(char *p)
status = led_get_status(i);
if (status < 0)
return -EIO;
- len += sprintf(p + len, "%d:\t\t%s\n",
+ seq_printf(m, "%d:\t\t%s\n",
i, str_led_status(status));
}
}
- len += sprintf(p + len, "commands:\t"
+ seq_printf(m, "commands:\t"
"<led> on, <led> off, <led> blink (<led> is 0-15)\n");
- return len;
+ return 0;
}
static int led_write(char *buf)
@@ -5416,18 +5409,16 @@ static int __init beep_init(struct ibm_init_struct *iibm)
return (beep_handle)? 0 : 1;
}
-static int beep_read(char *p)
+static int beep_read(struct seq_file *m)
{
- int len = 0;
-
if (!beep_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
}
- return len;
+ return 0;
}
static int beep_write(char *buf)
@@ -5480,8 +5471,11 @@ enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+
+ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
};
+
#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
struct ibm_thermal_sensors_struct {
s32 temp[TPACPI_MAX_THERMAL_SENSORS];
@@ -5571,6 +5565,28 @@ static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
return n;
}
+static void thermal_dump_all_sensors(void)
+{
+ int n, i;
+ struct ibm_thermal_sensors_struct t;
+
+ n = thermal_get_sensors(&t);
+ if (n <= 0)
+ return;
+
+ printk(TPACPI_NOTICE
+ "temperatures (Celsius):");
+
+ for (i = 0; i < n; i++) {
+ if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
+ printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ else
+ printk(KERN_CONT " N/A");
+ }
+
+ printk(KERN_CONT "\n");
+}
+
/* sysfs temp##_input -------------------------------------------------- */
static ssize_t thermal_temp_input_show(struct device *dev,
@@ -5586,7 +5602,7 @@ static ssize_t thermal_temp_input_show(struct device *dev,
res = thermal_get_sensor(idx, &value);
if (res)
return res;
- if (value == TP_EC_THERMAL_TMP_NA * 1000)
+ if (value == TPACPI_THERMAL_SENSOR_NA)
return -ENXIO;
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -5755,7 +5771,7 @@ static void thermal_exit(void)
case TPACPI_THERMAL_ACPI_TMP07:
case TPACPI_THERMAL_ACPI_UPDT:
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
- &thermal_temp_input16_group);
+ &thermal_temp_input8_group);
break;
case TPACPI_THERMAL_NONE:
default:
@@ -5763,9 +5779,8 @@ static void thermal_exit(void)
}
}
-static int thermal_read(char *p)
+static int thermal_read(struct seq_file *m)
{
- int len = 0;
int n, i;
struct ibm_thermal_sensors_struct t;
@@ -5773,16 +5788,16 @@ static int thermal_read(char *p)
if (unlikely(n < 0))
return n;
- len += sprintf(p + len, "temperatures:\t");
+ seq_printf(m, "temperatures:\t");
if (n > 0) {
for (i = 0; i < (n - 1); i++)
- len += sprintf(p + len, "%d ", t.temp[i] / 1000);
- len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
+ seq_printf(m, "%d ", t.temp[i] / 1000);
+ seq_printf(m, "%d\n", t.temp[i] / 1000);
} else
- len += sprintf(p + len, "not supported\n");
+ seq_printf(m, "not supported\n");
- return len;
+ return 0;
}
static struct ibm_struct thermal_driver_data = {
@@ -5797,39 +5812,38 @@ static struct ibm_struct thermal_driver_data = {
static u8 ecdump_regs[256];
-static int ecdump_read(char *p)
+static int ecdump_read(struct seq_file *m)
{
- int len = 0;
int i, j;
u8 v;
- len += sprintf(p + len, "EC "
+ seq_printf(m, "EC "
" +00 +01 +02 +03 +04 +05 +06 +07"
" +08 +09 +0a +0b +0c +0d +0e +0f\n");
for (i = 0; i < 256; i += 16) {
- len += sprintf(p + len, "EC 0x%02x:", i);
+ seq_printf(m, "EC 0x%02x:", i);
for (j = 0; j < 16; j++) {
if (!acpi_ec_read(i + j, &v))
break;
if (v != ecdump_regs[i + j])
- len += sprintf(p + len, " *%02x", v);
+ seq_printf(m, " *%02x", v);
else
- len += sprintf(p + len, " %02x", v);
+ seq_printf(m, " %02x", v);
ecdump_regs[i + j] = v;
}
- len += sprintf(p + len, "\n");
+ seq_putc(m, '\n');
if (j != 16)
break;
}
/* These are way too dangerous to advertise openly... */
#if 0
- len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
+ seq_printf(m, "commands:\t0x<offset> 0x<value>"
" (<offset> is 00-ff, <value> is 00-ff)\n");
- len += sprintf(p + len, "commands:\t0x<offset> <value> "
+ seq_printf(m, "commands:\t0x<offset> <value> "
" (<offset> is 00-ff, <value> is 0-255)\n");
#endif
- return len;
+ return 0;
}
static int ecdump_write(char *buf)
@@ -6092,6 +6106,12 @@ static int brightness_get(struct backlight_device *bd)
return status & TP_EC_BACKLIGHT_LVLMSK;
}
+static void tpacpi_brightness_notify_change(void)
+{
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_HOTKEY);
+}
+
static struct backlight_ops ibm_backlight_data = {
.get_brightness = brightness_get,
.update_status = brightness_update_status,
@@ -6120,8 +6140,8 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
/* Models with Intel Extreme Graphics 2 */
TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel GMA900 */
TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
@@ -6246,6 +6266,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
backlight_update_status(ibm_backlight_device);
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness: registering brightness hotkeys "
+ "as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_BRGHTUP_MASK
+ | TP_ACPI_HKEY_BRGHTDWN_MASK);;
return 0;
}
@@ -6270,23 +6296,22 @@ static void brightness_exit(void)
tpacpi_brightness_checkpoint_nvram();
}
-static int brightness_read(char *p)
+static int brightness_read(struct seq_file *m)
{
- int len = 0;
int level;
level = brightness_get(NULL);
if (level < 0) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ seq_printf(m, "level:\t\tunreadable\n");
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level);
- len += sprintf(p + len, "commands:\tup, down\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
+ seq_printf(m, "level:\t\t%d\n", level);
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level>"
" (<level> is 0-%d)\n",
(tp_features.bright_16levels) ? 15 : 7);
}
- return len;
+ return 0;
}
static int brightness_write(char *buf)
@@ -6322,6 +6347,9 @@ static int brightness_write(char *buf)
* Doing it this way makes the syscall restartable in case of EINTR
*/
rc = brightness_set(level);
+ if (!rc && ibm_backlight_device)
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_SYSFS);
return (rc == -EINTR)? -ERESTARTSYS : rc;
}
@@ -6338,101 +6366,685 @@ static struct ibm_struct brightness_driver_data = {
* Volume subdriver
*/
-static int volume_offset = 0x30;
+/*
+ * IBM ThinkPads have a simple volume controller with MUTE gating.
+ * Very early Lenovo ThinkPads follow the IBM ThinkPad spec.
+ *
+ * Since the *61 series (and probably also the later *60 series), Lenovo
+ * ThinkPads only implement the MUTE gate.
+ *
+ * EC register 0x30
+ * Bit 6: MUTE (1 mutes sound)
+ * Bit 3-0: Volume
+ * Other bits should be zero as far as we know.
+ *
+ * This is also stored in CMOS NVRAM, byte 0x60, bit 6 (MUTE), and
+ * bits 3-0 (volume). Other bits in NVRAM may have other functions,
+ * such as bit 7 which is used to detect repeated presses of MUTE,
+ * and we leave them unchanged.
+ */
+
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
+
+#define TPACPI_ALSA_DRVNAME "ThinkPad EC"
+#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
+#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
+
+static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
+static char *alsa_id = "ThinkPadEC";
+static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
+
+struct tpacpi_alsa_data {
+ struct snd_card *card;
+ struct snd_ctl_elem_id *ctl_mute_id;
+ struct snd_ctl_elem_id *ctl_vol_id;
+};
+
+static struct snd_card *alsa_card;
+
+enum {
+ TP_EC_AUDIO = 0x30,
+
+ /* TP_EC_AUDIO bits */
+ TP_EC_AUDIO_MUTESW = 6,
+
+ /* TP_EC_AUDIO bitmasks */
+ TP_EC_AUDIO_LVL_MSK = 0x0F,
+ TP_EC_AUDIO_MUTESW_MSK = (1 << TP_EC_AUDIO_MUTESW),
-static int volume_read(char *p)
+ /* Maximum volume */
+ TP_EC_VOLUME_MAX = 14,
+};
+
+enum tpacpi_volume_access_mode {
+ TPACPI_VOL_MODE_AUTO = 0, /* Not implemented yet */
+ TPACPI_VOL_MODE_EC, /* Pure EC control */
+ TPACPI_VOL_MODE_UCMS_STEP, /* UCMS step-based control: N/A */
+ TPACPI_VOL_MODE_ECNVRAM, /* EC control w/ NVRAM store */
+ TPACPI_VOL_MODE_MAX
+};
+
+enum tpacpi_volume_capabilities {
+ TPACPI_VOL_CAP_AUTO = 0, /* Use white/blacklist */
+ TPACPI_VOL_CAP_VOLMUTE, /* Output vol and mute */
+ TPACPI_VOL_CAP_MUTEONLY, /* Output mute only */
+ TPACPI_VOL_CAP_MAX
+};
+
+static enum tpacpi_volume_access_mode volume_mode =
+ TPACPI_VOL_MODE_MAX;
+
+static enum tpacpi_volume_capabilities volume_capabilities;
+static int volume_control_allowed;
+
+/*
+ * Used to syncronize writers to TP_EC_AUDIO and
+ * TP_NVRAM_ADDR_MIXER, as we need to do read-modify-write
+ */
+static struct mutex volume_mutex;
+
+static void tpacpi_volume_checkpoint_nvram(void)
{
- int len = 0;
- u8 level;
+ u8 lec = 0;
+ u8 b_nvram;
+ u8 ec_mask;
+
+ if (volume_mode != TPACPI_VOL_MODE_ECNVRAM)
+ return;
+ if (!volume_control_allowed)
+ return;
+
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "trying to checkpoint mixer state to NVRAM...\n");
+
+ if (tp_features.mixer_no_level_control)
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK;
+ else
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK | TP_EC_AUDIO_LVL_MSK;
- if (!acpi_ec_read(volume_offset, &level)) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return;
+
+ if (unlikely(!acpi_ec_read(TP_EC_AUDIO, &lec)))
+ goto unlock;
+ lec &= ec_mask;
+ b_nvram = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+
+ if (lec != (b_nvram & ec_mask)) {
+ /* NVRAM needs update */
+ b_nvram &= ~ec_mask;
+ b_nvram |= lec;
+ nvram_write_byte(b_nvram, TP_NVRAM_ADDR_MIXER);
+ dbg_printk(TPACPI_DBG_MIXER,
+ "updated NVRAM mixer status to 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
- len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
- len += sprintf(p + len, "commands:\tup, down, mute\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
- " (<level> is 0-15)\n");
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "NVRAM mixer status already is 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
}
- return len;
+unlock:
+ mutex_unlock(&volume_mutex);
}
-static int volume_write(char *buf)
+static int volume_get_status_ec(u8 *status)
{
- int cmos_cmd, inc, i;
- u8 level, mute;
- int new_level, new_mute;
- char *cmd;
+ u8 s;
- while ((cmd = next_cmd(&buf))) {
- if (!acpi_ec_read(volume_offset, &level))
- return -EIO;
- new_mute = mute = level & 0x40;
- new_level = level = level & 0xf;
+ if (!acpi_ec_read(TP_EC_AUDIO, &s))
+ return -EIO;
- if (strlencmp(cmd, "up") == 0) {
- if (mute)
- new_mute = 0;
- else
- new_level = level == 15 ? 15 : level + 1;
- } else if (strlencmp(cmd, "down") == 0) {
- if (mute)
- new_mute = 0;
- else
- new_level = level == 0 ? 0 : level - 1;
- } else if (sscanf(cmd, "level %d", &new_level) == 1 &&
- new_level >= 0 && new_level <= 15) {
- /* new_level set */
- } else if (strlencmp(cmd, "mute") == 0) {
- new_mute = 0x40;
- } else
- return -EINVAL;
+ *status = s;
- if (new_level != level) {
- /* mute doesn't change */
+ dbg_printk(TPACPI_DBG_MIXER, "status 0x%02x\n", s);
- cmos_cmd = (new_level > level) ?
- TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
- inc = new_level > level ? 1 : -1;
+ return 0;
+}
- if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, level)))
- return -EIO;
+static int volume_get_status(u8 *status)
+{
+ return volume_get_status_ec(status);
+}
+
+static int volume_set_status_ec(const u8 status)
+{
+ if (!acpi_ec_write(TP_EC_AUDIO, status))
+ return -EIO;
- for (i = level; i != new_level; i += inc)
- if (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, i + inc))
- return -EIO;
+ dbg_printk(TPACPI_DBG_MIXER, "set EC mixer to 0x%02x\n", status);
- if (mute &&
- (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
- !acpi_ec_write(volume_offset, new_level + mute))) {
- return -EIO;
- }
+ return 0;
+}
+
+static int volume_set_status(const u8 status)
+{
+ return volume_set_status_ec(status);
+}
+
+static int volume_set_mute_ec(const bool mute)
+{
+ int rc;
+ u8 s, n;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
+ s & ~TP_EC_AUDIO_MUTESW_MSK;
+
+ if (n != s)
+ rc = volume_set_status_ec(n);
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_set_mute(const bool mute)
+{
+ dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
+ (mute) ? "" : "un");
+ return volume_set_mute_ec(mute);
+}
+
+static int volume_set_volume_ec(const u8 vol)
+{
+ int rc;
+ u8 s, n;
+
+ if (vol > TP_EC_VOLUME_MAX)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
+
+ if (n != s)
+ rc = volume_set_status_ec(n);
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_set_volume(const u8 vol)
+{
+ dbg_printk(TPACPI_DBG_MIXER,
+ "trying to set volume level to %hu\n", vol);
+ return volume_set_volume_ec(vol);
+}
+
+static void volume_alsa_notify_change(void)
+{
+ struct tpacpi_alsa_data *d;
+
+ if (alsa_card && alsa_card->private_data) {
+ d = alsa_card->private_data;
+ if (d->ctl_mute_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_mute_id);
+ if (d->ctl_vol_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_vol_id);
+ }
+}
+
+static int volume_alsa_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = TP_EC_VOLUME_MAX;
+ return 0;
+}
+
+static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] = s & TP_EC_AUDIO_LVL_MSK;
+ return 0;
+}
+
+static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return volume_set_volume(ucontrol->value.integer.value[0]);
+}
+
+#define volume_alsa_mute_info snd_ctl_boolean_mono_info
+
+static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] =
+ (s & TP_EC_AUDIO_MUTESW_MSK) ? 0 : 1;
+ return 0;
+}
+
+static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return volume_set_mute(!ucontrol->value.integer.value[0]);
+}
+
+static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_vol_info,
+ .get = volume_alsa_vol_get,
+};
+
+static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_mute_info,
+ .get = volume_alsa_mute_get,
+};
+
+static void volume_suspend(pm_message_t state)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_resume(void)
+{
+ volume_alsa_notify_change();
+}
+
+static void volume_shutdown(void)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_exit(void)
+{
+ if (alsa_card) {
+ snd_card_free(alsa_card);
+ alsa_card = NULL;
+ }
+
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static int __init volume_create_alsa_mixer(void)
+{
+ struct snd_card *card;
+ struct tpacpi_alsa_data *data;
+ struct snd_kcontrol *ctl_vol;
+ struct snd_kcontrol *ctl_mute;
+ int rc;
+
+ rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
+ sizeof(struct tpacpi_alsa_data), &card);
+ if (rc < 0 || !card) {
+ printk(TPACPI_ERR
+ "Failed to create ALSA card structures: %d\n", rc);
+ return 1;
+ }
+
+ BUG_ON(!card->private_data);
+ data = card->private_data;
+ data->card = card;
+
+ strlcpy(card->driver, TPACPI_ALSA_DRVNAME,
+ sizeof(card->driver));
+ strlcpy(card->shortname, TPACPI_ALSA_SHRTNAME,
+ sizeof(card->shortname));
+ snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "(unknown)");
+ snprintf(card->longname, sizeof(card->longname),
+ "%s at EC reg 0x%02x, fw %s", card->shortname, TP_EC_AUDIO,
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ if (volume_control_allowed) {
+ volume_alsa_control_vol.put = volume_alsa_vol_put;
+ volume_alsa_control_vol.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+
+ volume_alsa_control_mute.put = volume_alsa_mute_put;
+ volume_alsa_control_mute.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ }
+
+ if (!tp_features.mixer_no_level_control) {
+ ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
+ rc = snd_ctl_add(card, ctl_vol);
+ if (rc < 0) {
+ printk(TPACPI_ERR
+ "Failed to create ALSA volume control: %d\n",
+ rc);
+ goto err_exit;
}
+ data->ctl_vol_id = &ctl_vol->id;
+ }
- if (new_mute != mute) {
- /* level doesn't change */
+ ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
+ rc = snd_ctl_add(card, ctl_mute);
+ if (rc < 0) {
+ printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
+ rc);
+ goto err_exit;
+ }
+ data->ctl_mute_id = &ctl_mute->id;
- cmos_cmd = (new_mute) ?
- TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
+ snd_card_set_dev(card, &tpacpi_pdev->dev);
+ rc = snd_card_register(card);
+ if (rc < 0) {
+ printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
+ goto err_exit;
+ }
- if (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, level + new_mute))
- return -EIO;
+ alsa_card = card;
+ return 0;
+
+err_exit:
+ snd_card_free(card);
+ return 1;
+}
+
+#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
+#define TPACPI_VOL_Q_LEVEL 0x0002 /* Volume control available */
+
+static const struct tpacpi_quirk volume_quirk_table[] __initconst = {
+ /* Whitelist volume level on all IBM by default */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_LEVEL },
+
+ /* Lenovo models with volume control (needs confirmation) */
+ TPACPI_QEC_LNV('7', 'C', TPACPI_VOL_Q_LEVEL), /* R60/i */
+ TPACPI_QEC_LNV('7', 'E', TPACPI_VOL_Q_LEVEL), /* R60e/i */
+ TPACPI_QEC_LNV('7', '9', TPACPI_VOL_Q_LEVEL), /* T60/p */
+ TPACPI_QEC_LNV('7', 'B', TPACPI_VOL_Q_LEVEL), /* X60/s */
+ TPACPI_QEC_LNV('7', 'J', TPACPI_VOL_Q_LEVEL), /* X60t */
+ TPACPI_QEC_LNV('7', '7', TPACPI_VOL_Q_LEVEL), /* Z60 */
+ TPACPI_QEC_LNV('7', 'F', TPACPI_VOL_Q_LEVEL), /* Z61 */
+
+ /* Whitelist mute-only on all Lenovo by default */
+ { .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_MUTEONLY }
+};
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ unsigned long quirks;
+ int rc;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing volume subdriver\n");
+
+ mutex_init(&volume_mutex);
+
+ /*
+ * Check for module parameter bogosity, note that we
+ * init volume_mode to TPACPI_VOL_MODE_MAX in order to be
+ * able to detect "unspecified"
+ */
+ if (volume_mode > TPACPI_VOL_MODE_MAX)
+ return -EINVAL;
+
+ if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
+ printk(TPACPI_ERR
+ "UCMS step volume mode not implemented, "
+ "please contact %s\n", TPACPI_MAIL);
+ return 1;
+ }
+
+ if (volume_capabilities >= TPACPI_VOL_CAP_MAX)
+ return -EINVAL;
+
+ /*
+ * The ALSA mixer is our primary interface.
+ * When disabled, don't install the subdriver at all
+ */
+ if (!alsa_enable) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "ALSA mixer disabled by parameter, "
+ "not loading volume subdriver...\n");
+ return 1;
+ }
+
+ quirks = tpacpi_check_quirks(volume_quirk_table,
+ ARRAY_SIZE(volume_quirk_table));
+
+ switch (volume_capabilities) {
+ case TPACPI_VOL_CAP_AUTO:
+ if (quirks & TPACPI_VOL_Q_MUTEONLY)
+ tp_features.mixer_no_level_control = 1;
+ else if (quirks & TPACPI_VOL_Q_LEVEL)
+ tp_features.mixer_no_level_control = 0;
+ else
+ return 1; /* no mixer */
+ break;
+ case TPACPI_VOL_CAP_VOLMUTE:
+ tp_features.mixer_no_level_control = 0;
+ break;
+ case TPACPI_VOL_CAP_MUTEONLY:
+ tp_features.mixer_no_level_control = 1;
+ break;
+ default:
+ return 1;
+ }
+
+ if (volume_capabilities != TPACPI_VOL_CAP_AUTO)
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_capabilities=%d\n",
+ volume_capabilities);
+
+ if (volume_mode == TPACPI_VOL_MODE_AUTO ||
+ volume_mode == TPACPI_VOL_MODE_MAX) {
+ volume_mode = TPACPI_VOL_MODE_ECNVRAM;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "driver auto-selected volume_mode=%d\n",
+ volume_mode);
+ } else {
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_mode=%d\n",
+ volume_mode);
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "mute is supported, volume control is %s\n",
+ str_supported(!tp_features.mixer_no_level_control));
+
+ rc = volume_create_alsa_mixer();
+ if (rc) {
+ printk(TPACPI_ERR
+ "Could not create the ALSA mixer interface\n");
+ return rc;
+ }
+
+ printk(TPACPI_INFO
+ "Console audio control enabled, mode: %s\n",
+ (volume_control_allowed) ?
+ "override (read/write)" :
+ "monitor (read only)");
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "registering volume hotkeys as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_VOLUP_MASK
+ | TP_ACPI_HKEY_VOLDWN_MASK
+ | TP_ACPI_HKEY_MUTE_MASK);
+
+ return 0;
+}
+
+static int volume_read(struct seq_file *m)
+{
+ u8 status;
+
+ if (volume_get_status(&status) < 0) {
+ seq_printf(m, "level:\t\tunreadable\n");
+ } else {
+ if (tp_features.mixer_no_level_control)
+ seq_printf(m, "level:\t\tunsupported\n");
+ else
+ seq_printf(m, "level:\t\t%d\n",
+ status & TP_EC_AUDIO_LVL_MSK);
+
+ seq_printf(m, "mute:\t\t%s\n",
+ onoff(status, TP_EC_AUDIO_MUTESW));
+
+ if (volume_control_allowed) {
+ seq_printf(m, "commands:\tunmute, mute\n");
+ if (!tp_features.mixer_no_level_control) {
+ seq_printf(m,
+ "commands:\tup, down\n");
+ seq_printf(m,
+ "commands:\tlevel <level>"
+ " (<level> is 0-%d)\n",
+ TP_EC_VOLUME_MAX);
+ }
}
}
return 0;
}
+static int volume_write(char *buf)
+{
+ u8 s;
+ u8 new_level, new_mute;
+ int l;
+ char *cmd;
+ int rc;
+
+ /*
+ * We do allow volume control at driver startup, so that the
+ * user can set initial state through the volume=... parameter hack.
+ */
+ if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
+ if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
+ tp_warned.volume_ctrl_forbidden = 1;
+ printk(TPACPI_NOTICE
+ "Console audio control in monitor mode, "
+ "changes are not allowed.\n");
+ printk(TPACPI_NOTICE
+ "Use the volume_control=1 module parameter "
+ "to enable volume control\n");
+ }
+ return -EPERM;
+ }
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ new_level = s & TP_EC_AUDIO_LVL_MSK;
+ new_mute = s & TP_EC_AUDIO_MUTESW_MSK;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (!tp_features.mixer_no_level_control) {
+ if (strlencmp(cmd, "up") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level < TP_EC_VOLUME_MAX)
+ new_level++;
+ continue;
+ } else if (strlencmp(cmd, "down") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level > 0)
+ new_level--;
+ continue;
+ } else if (sscanf(cmd, "level %u", &l) == 1 &&
+ l >= 0 && l <= TP_EC_VOLUME_MAX) {
+ new_level = l;
+ continue;
+ }
+ }
+ if (strlencmp(cmd, "mute") == 0)
+ new_mute = TP_EC_AUDIO_MUTESW_MSK;
+ else if (strlencmp(cmd, "unmute") == 0)
+ new_mute = 0;
+ else
+ return -EINVAL;
+ }
+
+ if (tp_features.mixer_no_level_control) {
+ tpacpi_disclose_usertask("procfs volume", "%smute\n",
+ new_mute ? "" : "un");
+ rc = volume_set_mute(!!new_mute);
+ } else {
+ tpacpi_disclose_usertask("procfs volume",
+ "%smute and set level to %d\n",
+ new_mute ? "" : "un", new_level);
+ rc = volume_set_status(new_mute | new_level);
+ }
+ volume_alsa_notify_change();
+
+ return (rc == -EINTR) ? -ERESTARTSYS : rc;
+}
+
static struct ibm_struct volume_driver_data = {
.name = "volume",
.read = volume_read,
.write = volume_write,
+ .exit = volume_exit,
+ .suspend = volume_suspend,
+ .resume = volume_resume,
+ .shutdown = volume_shutdown,
};
+#else /* !CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
+#define alsa_card NULL
+
+static void inline volume_alsa_notify_change(void)
+{
+}
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ printk(TPACPI_INFO
+ "volume: disabled as there is no ALSA support in this kernel\n");
+
+ return 1;
+}
+
+static struct ibm_struct volume_driver_data = {
+ .name = "volume",
+};
+
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
/*************************************************************************
* Fan subdriver
*/
@@ -7507,9 +8119,8 @@ static void fan_resume(void)
}
}
-static int fan_read(char *p)
+static int fan_read(struct seq_file *m)
{
- int len = 0;
int rc;
u8 status;
unsigned int speed = 0;
@@ -7521,7 +8132,7 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n"
+ seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
(status != 0) ? "enabled" : "disabled", status);
break;
@@ -7532,54 +8143,54 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status != 0) ? "enabled" : "disabled");
rc = fan_get_speed(&speed);
if (rc < 0)
return rc;
- len += sprintf(p + len, "speed:\t\t%d\n", speed);
+ seq_printf(m, "speed:\t\t%d\n", speed);
if (status & TP_EC_FAN_FULLSPEED)
/* Disengaged mode takes precedence */
- len += sprintf(p + len, "level:\t\tdisengaged\n");
+ seq_printf(m, "level:\t\tdisengaged\n");
else if (status & TP_EC_FAN_AUTO)
- len += sprintf(p + len, "level:\t\tauto\n");
+ seq_printf(m, "level:\t\tauto\n");
else
- len += sprintf(p + len, "level:\t\t%d\n", status);
+ seq_printf(m, "level:\t\t%d\n", status);
break;
case TPACPI_FAN_NONE:
default:
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
}
if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
- len += sprintf(p + len, "commands:\tlevel <level>");
+ seq_printf(m, "commands:\tlevel <level>");
switch (fan_control_access_mode) {
case TPACPI_FAN_WR_ACPI_SFAN:
- len += sprintf(p + len, " (<level> is 0-7)\n");
+ seq_printf(m, " (<level> is 0-7)\n");
break;
default:
- len += sprintf(p + len, " (<level> is 0-7, "
+ seq_printf(m, " (<level> is 0-7, "
"auto, disengaged, full-speed)\n");
break;
}
}
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
- len += sprintf(p + len, "commands:\tenable, disable\n"
+ seq_printf(m, "commands:\tenable, disable\n"
"commands:\twatchdog <timeout> (<timeout> "
"is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
- len += sprintf(p + len, "commands:\tspeed <speed>"
+ seq_printf(m, "commands:\tspeed <speed>"
" (<speed> is 0-65535)\n");
- return len;
+ return 0;
}
static int fan_write_cmd_level(const char *cmd, int *rc)
@@ -7721,10 +8332,23 @@ static struct ibm_struct fan_driver_data = {
*/
static void tpacpi_driver_event(const unsigned int hkey_event)
{
+ if (ibm_backlight_device) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_BRGHT_UP:
+ case TP_HKEY_EV_BRGHT_DOWN:
+ tpacpi_brightness_notify_change();
+ }
+ }
+ if (alsa_card) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_VOL_UP:
+ case TP_HKEY_EV_VOL_DOWN:
+ case TP_HKEY_EV_VOL_MUTE:
+ volume_alsa_notify_change();
+ }
+ }
}
-
-
static void hotkey_driver_event(const unsigned int scancode)
{
tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
@@ -7853,19 +8477,19 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- entry = create_proc_entry(ibm->name,
- S_IFREG | S_IRUGO | S_IWUSR,
- proc_dir);
+ mode_t mode;
+
+ mode = S_IRUGO;
+ if (ibm->write)
+ mode |= S_IWUSR;
+ entry = proc_create_data(ibm->name, mode, proc_dir,
+ &dispatch_proc_fops, ibm);
if (!entry) {
printk(TPACPI_ERR "unable to create proc entry %s\n",
ibm->name);
ret = -ENODEV;
goto err_out;
}
- entry->data = ibm;
- entry->read_proc = &dispatch_procfs_read;
- if (ibm->write)
- entry->write_proc = &dispatch_procfs_write;
ibm->flags.proc_created = 1;
}
@@ -8077,6 +8701,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.data = &brightness_driver_data,
},
{
+ .init = volume_init,
.data = &volume_driver_data,
},
{
@@ -8112,36 +8737,61 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-module_param(experimental, int, 0);
+module_param(experimental, int, 0444);
MODULE_PARM_DESC(experimental,
"Enables experimental features when non-zero");
module_param_named(debug, dbg_level, uint, 0);
MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-module_param(force_load, bool, 0);
+module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
"Attempts to load the driver even on a "
"mis-identified ThinkPad when true");
-module_param_named(fan_control, fan_control_allowed, bool, 0);
+module_param_named(fan_control, fan_control_allowed, bool, 0444);
MODULE_PARM_DESC(fan_control,
"Enables setting fan parameters features when true");
-module_param_named(brightness_mode, brightness_mode, uint, 0);
+module_param_named(brightness_mode, brightness_mode, uint, 0444);
MODULE_PARM_DESC(brightness_mode,
"Selects brightness control strategy: "
"0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
-module_param(brightness_enable, uint, 0);
+module_param(brightness_enable, uint, 0444);
MODULE_PARM_DESC(brightness_enable,
"Enables backlight control when 1, disables when 0");
-module_param(hotkey_report_mode, uint, 0);
+module_param(hotkey_report_mode, uint, 0444);
MODULE_PARM_DESC(hotkey_report_mode,
"used for backwards compatibility with userspace, "
"see documentation");
+#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
+module_param_named(volume_mode, volume_mode, uint, 0444);
+MODULE_PARM_DESC(volume_mode,
+ "Selects volume control strategy: "
+ "0=auto, 1=EC, 2=N/A, 3=EC+NVRAM");
+
+module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
+MODULE_PARM_DESC(volume_capabilities,
+ "Selects the mixer capabilites: "
+ "0=auto, 1=volume and mute, 2=mute only");
+
+module_param_named(volume_control, volume_control_allowed, bool, 0444);
+MODULE_PARM_DESC(volume_control,
+ "Enables software override for the console audio "
+ "control when true");
+
+/* ALSA module API parameters */
+module_param_named(index, alsa_index, int, 0444);
+MODULE_PARM_DESC(index, "ALSA index for the ACPI EC Mixer");
+module_param_named(id, alsa_id, charp, 0444);
+MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer");
+module_param_named(enable, alsa_enable, bool, 0444);
+MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
+#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+
#define TPACPI_PARAM(feature) \
module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
@@ -8160,25 +8810,25 @@ TPACPI_PARAM(volume);
TPACPI_PARAM(fan);
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
-module_param(dbg_wlswemul, uint, 0);
+module_param(dbg_wlswemul, uint, 0444);
MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
MODULE_PARM_DESC(wlsw_state,
"Initial state of the emulated WLSW switch");
-module_param(dbg_bluetoothemul, uint, 0);
+module_param(dbg_bluetoothemul, uint, 0444);
MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
MODULE_PARM_DESC(bluetooth_state,
"Initial state of the emulated bluetooth switch");
-module_param(dbg_wwanemul, uint, 0);
+module_param(dbg_wwanemul, uint, 0444);
MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
MODULE_PARM_DESC(wwan_state,
"Initial state of the emulated WWAN switch");
-module_param(dbg_uwbemul, uint, 0);
+module_param(dbg_uwbemul, uint, 0444);
MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
MODULE_PARM_DESC(uwb_state,
@@ -8371,6 +9021,7 @@ static int __init thinkpad_acpi_module_init(void)
PCI_VENDOR_ID_IBM;
tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
}
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ret = ibm_init(&ibms_init[i]);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 51c0a8b..77bf5d8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -42,6 +42,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
@@ -357,63 +358,6 @@ static int force_fan;
static int last_key_event;
static int key_event_valid;
-typedef struct _ProcItem {
- const char *name;
- char *(*read_func) (char *);
- unsigned long (*write_func) (const char *, unsigned long);
-} ProcItem;
-
-/* proc file handlers
- */
-
-static int
-dispatch_read(char *page, char **start, off_t off, int count, int *eof,
- ProcItem * item)
-{
- char *p = page;
- int len;
-
- if (off == 0)
- p = item->read_func(p);
-
- /* ISSUE: I don't understand this code */
- len = (p - page);
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
-}
-
-static int
-dispatch_write(struct file *file, const char __user * buffer,
- unsigned long count, ProcItem * item)
-{
- int result;
- char *tmp_buffer;
-
- /* Arg buffer points to userspace memory, which can't be accessed
- * directly. Since we're making a copy, zero-terminate the
- * destination so that sscanf can be used on it safely.
- */
- tmp_buffer = kmalloc(count + 1, GFP_KERNEL);
- if (!tmp_buffer)
- return -ENOMEM;
-
- if (copy_from_user(tmp_buffer, buffer, count)) {
- result = -EFAULT;
- } else {
- tmp_buffer[count] = 0;
- result = item->write_func(tmp_buffer, count);
- }
- kfree(tmp_buffer);
- return result;
-}
-
static int get_lcd(struct backlight_device *bd)
{
u32 hci_result;
@@ -426,19 +370,24 @@ static int get_lcd(struct backlight_device *bd)
return -EFAULT;
}
-static char *read_lcd(char *p)
+static int lcd_proc_show(struct seq_file *m, void *v)
{
int value = get_lcd(NULL);
if (value >= 0) {
- p += sprintf(p, "brightness: %d\n", value);
- p += sprintf(p, "brightness_levels: %d\n",
+ seq_printf(m, "brightness: %d\n", value);
+ seq_printf(m, "brightness_levels: %d\n",
HCI_LCD_BRIGHTNESS_LEVELS);
} else {
printk(MY_ERR "Error reading LCD brightness\n");
}
- return p;
+ return 0;
+}
+
+static int lcd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lcd_proc_show, NULL);
}
static int set_lcd(int value)
@@ -458,12 +407,20 @@ static int set_lcd_status(struct backlight_device *bd)
return set_lcd(bd->props.brightness);
}
-static unsigned long write_lcd(const char *buffer, unsigned long count)
+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
+ char cmd[42];
+ size_t len;
int value;
int ret;
- if (sscanf(buffer, " brightness : %i", &value) == 1 &&
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ if (sscanf(cmd, " brightness : %i", &value) == 1 &&
value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
ret = set_lcd(value);
if (ret == 0)
@@ -474,7 +431,16 @@ static unsigned long write_lcd(const char *buffer, unsigned long count)
return ret;
}
-static char *read_video(char *p)
+static const struct file_operations lcd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = lcd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = lcd_proc_write,
+};
+
+static int video_proc_show(struct seq_file *m, void *v)
{
u32 hci_result;
u32 value;
@@ -484,18 +450,25 @@ static char *read_video(char *p)
int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
- p += sprintf(p, "lcd_out: %d\n", is_lcd);
- p += sprintf(p, "crt_out: %d\n", is_crt);
- p += sprintf(p, "tv_out: %d\n", is_tv);
+ seq_printf(m, "lcd_out: %d\n", is_lcd);
+ seq_printf(m, "crt_out: %d\n", is_crt);
+ seq_printf(m, "tv_out: %d\n", is_tv);
} else {
printk(MY_ERR "Error reading video out status\n");
}
- return p;
+ return 0;
}
-static unsigned long write_video(const char *buffer, unsigned long count)
+static int video_proc_open(struct inode *inode, struct file *file)
{
+ return single_open(file, video_proc_show, NULL);
+}
+
+static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char *cmd, *buffer;
int value;
int remain = count;
int lcd_out = -1;
@@ -504,6 +477,17 @@ static unsigned long write_video(const char *buffer, unsigned long count)
u32 hci_result;
u32 video_out;
+ cmd = kmalloc(count + 1, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+ if (copy_from_user(cmd, buf, count)) {
+ kfree(cmd);
+ return -EFAULT;
+ }
+ cmd[count] = '\0';
+
+ buffer = cmd;
+
/* scan expression. Multiple expressions may be delimited with ;
*
* NOTE: to keep scanning simple, invalid fields are ignored
@@ -523,6 +507,8 @@ static unsigned long write_video(const char *buffer, unsigned long count)
while (remain && *(buffer - 1) != ';');
}
+ kfree(cmd);
+
hci_read1(HCI_VIDEO_OUT, &video_out, &hci_result);
if (hci_result == HCI_SUCCESS) {
unsigned int new_video_out = video_out;
@@ -543,28 +529,50 @@ static unsigned long write_video(const char *buffer, unsigned long count)
return count;
}
-static char *read_fan(char *p)
+static const struct file_operations video_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = video_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = video_proc_write,
+};
+
+static int fan_proc_show(struct seq_file *m, void *v)
{
u32 hci_result;
u32 value;
hci_read1(HCI_FAN, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
- p += sprintf(p, "running: %d\n", (value > 0));
- p += sprintf(p, "force_on: %d\n", force_fan);
+ seq_printf(m, "running: %d\n", (value > 0));
+ seq_printf(m, "force_on: %d\n", force_fan);
} else {
printk(MY_ERR "Error reading fan status\n");
}
- return p;
+ return 0;
+}
+
+static int fan_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fan_proc_show, NULL);
}
-static unsigned long write_fan(const char *buffer, unsigned long count)
+static ssize_t fan_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
+ char cmd[42];
+ size_t len;
int value;
u32 hci_result;
- if (sscanf(buffer, " force_on : %i", &value) == 1 &&
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ if (sscanf(cmd, " force_on : %i", &value) == 1 &&
value >= 0 && value <= 1) {
hci_write1(HCI_FAN, value, &hci_result);
if (hci_result != HCI_SUCCESS)
@@ -578,7 +586,16 @@ static unsigned long write_fan(const char *buffer, unsigned long count)
return count;
}
-static char *read_keys(char *p)
+static const struct file_operations fan_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = fan_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = fan_proc_write,
+};
+
+static int keys_proc_show(struct seq_file *m, void *v)
{
u32 hci_result;
u32 value;
@@ -602,18 +619,30 @@ static char *read_keys(char *p)
}
}
- p += sprintf(p, "hotkey_ready: %d\n", key_event_valid);
- p += sprintf(p, "hotkey: 0x%04x\n", last_key_event);
+ seq_printf(m, "hotkey_ready: %d\n", key_event_valid);
+ seq_printf(m, "hotkey: 0x%04x\n", last_key_event);
+end:
+ return 0;
+}
- end:
- return p;
+static int keys_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, keys_proc_show, NULL);
}
-static unsigned long write_keys(const char *buffer, unsigned long count)
+static ssize_t keys_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
+ char cmd[42];
+ size_t len;
int value;
- if (sscanf(buffer, " hotkey_ready : %i", &value) == 1 && value == 0) {
+ len = min(count, sizeof(cmd) - 1);
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+ cmd[len] = '\0';
+
+ if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) {
key_event_valid = 0;
} else {
return -EINVAL;
@@ -622,52 +651,58 @@ static unsigned long write_keys(const char *buffer, unsigned long count)
return count;
}
-static char *read_version(char *p)
+static const struct file_operations keys_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = keys_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = keys_proc_write,
+};
+
+static int version_proc_show(struct seq_file *m, void *v)
{
- p += sprintf(p, "driver: %s\n", TOSHIBA_ACPI_VERSION);
- p += sprintf(p, "proc_interface: %d\n",
- PROC_INTERFACE_VERSION);
- return p;
+ seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION);
+ seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION);
+ return 0;
}
+static int version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, version_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations version_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* proc and module init
*/
#define PROC_TOSHIBA "toshiba"
-static ProcItem proc_items[] = {
- {"lcd", read_lcd, write_lcd},
- {"video", read_video, write_video},
- {"fan", read_fan, write_fan},
- {"keys", read_keys, write_keys},
- {"version", read_version, NULL},
- {NULL}
-};
-
static acpi_status __init add_device(void)
{
- struct proc_dir_entry *proc;
- ProcItem *item;
-
- for (item = proc_items; item->name; ++item) {
- proc = create_proc_read_entry(item->name,
- S_IFREG | S_IRUGO | S_IWUSR,
- toshiba_proc_dir,
- (read_proc_t *) dispatch_read,
- item);
- if (proc && item->write_func)
- proc->write_proc = (write_proc_t *) dispatch_write;
- }
+ proc_create("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir, &lcd_proc_fops);
+ proc_create("video", S_IRUGO | S_IWUSR, toshiba_proc_dir, &video_proc_fops);
+ proc_create("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir, &fan_proc_fops);
+ proc_create("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir, &keys_proc_fops);
+ proc_create("version", S_IRUGO, toshiba_proc_dir, &version_proc_fops);
return AE_OK;
}
static acpi_status remove_device(void)
{
- ProcItem *item;
-
- for (item = proc_items; item->name; ++item)
- remove_proc_entry(item->name, toshiba_proc_dir);
+ remove_proc_entry("lcd", toshiba_proc_dir);
+ remove_proc_entry("video", toshiba_proc_dir);
+ remove_proc_entry("fan", toshiba_proc_dir);
+ remove_proc_entry("keys", toshiba_proc_dir);
+ remove_proc_entry("version", toshiba_proc_dir);
return AE_OK;
}
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
new file mode 100644
index 0000000..a350418
--- /dev/null
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -0,0 +1,144 @@
+/*
+ * Toshiba Bluetooth Enable Driver
+ *
+ * Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
+ *
+ * Thanks to Matthew Garrett for background info on ACPI innards which
+ * normal people aren't meant to understand :-)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note the Toshiba Bluetooth RFKill switch seems to be a strange
+ * fish. It only provides a BT event when the switch is flipped to
+ * the 'on' position. When flipping it to 'off', the USB device is
+ * simply pulled away underneath us, without any BT event being
+ * delivered.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
+MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
+MODULE_LICENSE("GPL");
+
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device);
+static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type);
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
+static int toshiba_bt_resume(struct acpi_device *device);
+
+static const struct acpi_device_id bt_device_ids[] = {
+ { "TOS6205", 0},
+ { "", 0},
+};
+MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+
+static struct acpi_driver toshiba_bt_rfkill_driver = {
+ .name = "Toshiba BT",
+ .class = "Toshiba",
+ .ids = bt_device_ids,
+ .ops = {
+ .add = toshiba_bt_rfkill_add,
+ .remove = toshiba_bt_rfkill_remove,
+ .notify = toshiba_bt_rfkill_notify,
+ .resume = toshiba_bt_resume,
+ },
+ .owner = THIS_MODULE,
+};
+
+
+static int toshiba_bluetooth_enable(acpi_handle handle)
+{
+ acpi_status res1, res2;
+ acpi_integer result;
+
+ /*
+ * Query ACPI to verify RFKill switch is set to 'on'.
+ * If not, we return silently, no need to report it as
+ * an error.
+ */
+ res1 = acpi_evaluate_integer(handle, "BTST", NULL, &result);
+ if (ACPI_FAILURE(res1))
+ return res1;
+ if (!(result & 0x01))
+ return 0;
+
+ printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n");
+ res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
+ res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
+ if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
+ return 0;
+
+ printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable "
+ "Toshiba Bluetooth\n");
+
+ return -ENODEV;
+}
+
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
+{
+ toshiba_bluetooth_enable(device->handle);
+}
+
+static int toshiba_bt_resume(struct acpi_device *device)
+{
+ return toshiba_bluetooth_enable(device->handle);
+}
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device)
+{
+ acpi_status status;
+ acpi_integer bt_present;
+ int result = -ENODEV;
+
+ /*
+ * Some Toshiba laptops may have a fake TOS6205 device in
+ * their ACPI BIOS, so query the _STA method to see if there
+ * is really anything there, before trying to enable it.
+ */
+ status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+ &bt_present);
+
+ if (!ACPI_FAILURE(status) && bt_present) {
+ printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - "
+ "installing RFKill handler\n");
+ result = toshiba_bluetooth_enable(device->handle);
+ }
+
+ return result;
+}
+
+static int __init toshiba_bt_rfkill_init(void)
+{
+ int result;
+
+ result = acpi_bus_register_driver(&toshiba_bt_rfkill_driver);
+ if (result < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error registering driver\n"));
+ return result;
+ }
+
+ return 0;
+}
+
+static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type)
+{
+ /* clean up */
+ return 0;
+}
+
+static void __exit toshiba_bt_rfkill_exit(void)
+{
+ acpi_bus_unregister_driver(&toshiba_bt_rfkill_driver);
+}
+
+module_init(toshiba_bt_rfkill_init);
+module_exit(toshiba_bt_rfkill_exit);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 177f8d7..b104302 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
@@ -65,6 +66,7 @@ struct wmi_block {
acpi_handle handle;
wmi_notify_handler handler;
void *handler_data;
+ struct device *dev;
};
static struct wmi_block wmi_blocks;
@@ -195,6 +197,34 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest)
return true;
}
+/*
+ * Convert a raw GUID to the ACII string representation
+ */
+static int wmi_gtoa(const char *in, char *out)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--)
+ out += sprintf(out, "%02X", in[i] & 0xFF);
+
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[5] & 0xFF);
+ out += sprintf(out, "%02X", in[4] & 0xFF);
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[7] & 0xFF);
+ out += sprintf(out, "%02X", in[6] & 0xFF);
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[8] & 0xFF);
+ out += sprintf(out, "%02X", in[9] & 0xFF);
+ out += sprintf(out, "-");
+
+ for (i = 10; i <= 15; i++)
+ out += sprintf(out, "%02X", in[i] & 0xFF);
+
+ out = '\0';
+ return 0;
+}
+
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
char tmp[16], guid_input[16];
@@ -462,8 +492,7 @@ wmi_notify_handler handler, void *data)
if (!guid || !handler)
return AE_BAD_PARAMETER;
- find_guid(guid, &block);
- if (!block)
+ if (!find_guid(guid, &block))
return AE_NOT_EXIST;
if (block->handler)
@@ -491,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!guid)
return AE_BAD_PARAMETER;
- find_guid(guid, &block);
- if (!block)
+ if (!find_guid(guid, &block))
return AE_NOT_EXIST;
if (!block->handler)
@@ -510,8 +538,8 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
/**
* wmi_get_event_data - Get WMI data associated with an event
*
- * @event - Event to find
- * &out - Buffer to hold event data
+ * @event: Event to find
+ * @out: Buffer to hold event data. out->pointer should be freed with kfree()
*
* Returns extra data associated with an event in WMI.
*/
@@ -555,6 +583,154 @@ bool wmi_has_guid(const char *guid_string)
EXPORT_SYMBOL_GPL(wmi_has_guid);
/*
+ * sysfs interface
+ */
+static ssize_t show_modalias(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char guid_string[37];
+ struct wmi_block *wblock;
+
+ wblock = dev_get_drvdata(dev);
+ if (!wblock)
+ return -ENOMEM;
+
+ wmi_gtoa(wblock->gblock.guid, guid_string);
+
+ return sprintf(buf, "wmi:%s\n", guid_string);
+}
+static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+
+static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char guid_string[37];
+
+ struct wmi_block *wblock;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ wblock = dev_get_drvdata(dev);
+ if (!wblock)
+ return -ENOMEM;
+
+ wmi_gtoa(wblock->gblock.guid, guid_string);
+
+ strcpy(&env->buf[env->buflen - 1], "wmi:");
+ memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
+ env->buflen += 40;
+
+ return 0;
+}
+
+static void wmi_dev_free(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct class wmi_class = {
+ .name = "wmi",
+ .dev_release = wmi_dev_free,
+ .dev_uevent = wmi_dev_uevent,
+};
+
+static int wmi_create_devs(void)
+{
+ int result;
+ char guid_string[37];
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct device *guid_dev;
+
+ /* Create devices for all the GUIDs */
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ guid_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!guid_dev)
+ return -ENOMEM;
+
+ wblock->dev = guid_dev;
+
+ guid_dev->class = &wmi_class;
+ dev_set_drvdata(guid_dev, wblock);
+
+ gblock = &wblock->gblock;
+
+ wmi_gtoa(gblock->guid, guid_string);
+ dev_set_name(guid_dev, guid_string);
+
+ result = device_register(guid_dev);
+ if (result)
+ return result;
+
+ result = device_create_file(guid_dev, &dev_attr_modalias);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+static void wmi_remove_devs(void)
+{
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct device *guid_dev;
+
+ /* Delete devices for all the GUIDs */
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ guid_dev = wblock->dev;
+ gblock = &wblock->gblock;
+
+ device_remove_file(guid_dev, &dev_attr_modalias);
+
+ device_unregister(guid_dev);
+ }
+}
+
+static void wmi_class_exit(void)
+{
+ wmi_remove_devs();
+ class_unregister(&wmi_class);
+}
+
+static int wmi_class_init(void)
+{
+ int ret;
+
+ ret = class_register(&wmi_class);
+ if (ret)
+ return ret;
+
+ ret = wmi_create_devs();
+ if (ret)
+ wmi_class_exit();
+
+ return ret;
+}
+
+static bool guid_already_parsed(const char *guid_string)
+{
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+ if (strncmp(gblock->guid, guid_string, 16) == 0)
+ return true;
+ }
+ return false;
+}
+
+/*
* Parse the _WDG method for the GUID data blocks
*/
static __init acpi_status parse_wdg(acpi_handle handle)
@@ -563,6 +739,7 @@ static __init acpi_status parse_wdg(acpi_handle handle)
union acpi_object *obj;
struct guid_block *gblock;
struct wmi_block *wblock;
+ char guid_string[37];
acpi_status status;
u32 i, total;
@@ -585,6 +762,19 @@ static __init acpi_status parse_wdg(acpi_handle handle)
memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
for (i = 0; i < total; i++) {
+ /*
+ Some WMI devices, like those for nVidia hooks, have a
+ duplicate GUID. It's not clear what we should do in this
+ case yet, so for now, we'll just ignore the duplicate.
+ Anyone who wants to add support for that device can come
+ up with a better workaround for the mess then.
+ */
+ if (guid_already_parsed(gblock[i].guid) == true) {
+ wmi_gtoa(gblock[i].guid, guid_string);
+ printk(KERN_INFO PREFIX "Skipping duplicate GUID %s\n",
+ guid_string);
+ continue;
+ }
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
if (!wblock)
return AE_NO_MEMORY;
@@ -709,10 +899,17 @@ static int __init acpi_wmi_init(void)
if (result < 0) {
printk(KERN_INFO PREFIX "Error loading mapper\n");
- } else {
- printk(KERN_INFO PREFIX "Mapper loaded\n");
+ return -ENODEV;
+ }
+
+ result = wmi_class_init();
+ if (result) {
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+ return result;
}
+ printk(KERN_INFO PREFIX "Mapper loaded\n");
+
return result;
}
@@ -721,6 +918,8 @@ static void __exit acpi_wmi_exit(void)
struct list_head *p, *tmp;
struct wmi_block *wblock;
+ wmi_class_exit();
+
acpi_bus_unregister_driver(&acpi_wmi_driver);
list_for_each_safe(p, tmp, &wmi_blocks.list) {
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 83b8b5a..5314bf6 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -80,7 +80,8 @@ static int pnpacpi_get_resources(struct pnp_dev *dev)
static int pnpacpi_set_resources(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
struct acpi_buffer buffer;
int ret;
@@ -103,7 +104,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
int ret;
dev_dbg(&dev->dev, "disable resources\n");
@@ -121,6 +123,8 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
#ifdef CONFIG_ACPI_SLEEP
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
int power_state;
power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
@@ -128,16 +132,19 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
power_state = (state.event == PM_EVENT_ON) ?
ACPI_STATE_D0 : ACPI_STATE_D3;
- return acpi_bus_set_power((acpi_handle) dev->data, power_state);
+ return acpi_bus_set_power(handle, power_state);
}
static int pnpacpi_resume(struct pnp_dev *dev)
{
- return acpi_bus_set_power((acpi_handle) dev->data, ACPI_STATE_D0);
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
+
+ return acpi_bus_set_power(handle, ACPI_STATE_D0);
}
#endif
-static struct pnp_protocol pnpacpi_protocol = {
+struct pnp_protocol pnpacpi_protocol = {
.name = "Plug and Play ACPI",
.get = pnpacpi_get_resources,
.set = pnpacpi_set_resources,
@@ -147,6 +154,7 @@ static struct pnp_protocol pnpacpi_protocol = {
.resume = pnpacpi_resume,
#endif
};
+EXPORT_SYMBOL(pnpacpi_protocol);
static int __init pnpacpi_add_device(struct acpi_device *device)
{
@@ -168,7 +176,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (!dev)
return -ENOMEM;
- dev->data = device->handle;
+ dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
status = acpi_get_handle(device->handle, "_SRS", &temp);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index ef3a2cd3a..5702b2c 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -465,7 +465,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
acpi_status status;
pnp_dbg(&dev->dev, "parse allocated resources\n");
@@ -773,7 +774,8 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
acpi_status status;
struct acpipnp_parse_option_s parse_data;
@@ -845,7 +847,8 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
int pnpacpi_build_resource_template(struct pnp_dev *dev,
struct acpi_buffer *buffer)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
struct acpi_resource *resource;
int res_cnt = 0;
acpi_status status;
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c
index 9346a86..9c87ad5 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/pmu_battery.c
@@ -89,6 +89,8 @@ static int pmu_bat_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
if (pbi->flags & PMU_BATT_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (pmu_power_flags & PMU_PWR_AC_PRESENT)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e75..6ea3cb5 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
dev_err(&dev->dev, "Do not pass platform_data through "
"wm97xx_bat_set_pdata!\n");
return -EINVAL;
- } else
- pdata = wmdata->batt_pdata;
+ }
+
+ if (!wmdata) {
+ dev_err(&dev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ pdata = wmdata->batt_pdata;
if (dev->id != -1)
return -EINVAL;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
new file mode 100644
index 0000000..0471955
--- /dev/null
+++ b/drivers/regulator/88pm8607.c
@@ -0,0 +1,685 @@
+/*
+ * Regulators driver for Marvell 88PM8607
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/88pm8607.h>
+
+struct pm8607_regulator_info {
+ struct regulator_desc desc;
+ struct pm8607_chip *chip;
+ struct regulator_dev *regulator;
+
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int vol_reg;
+ int vol_shift;
+ int vol_nbits;
+ int update_reg;
+ int update_bit;
+ int enable_reg;
+ int enable_bit;
+ int slope_double;
+};
+
+static inline int check_range(struct pm8607_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ uint8_t chip_id = info->chip->chip_id;
+ int ret = -EINVAL;
+
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ ret = (index < 0x1d) ? (index * 25000 + 800000) :
+ ((index < 0x20) ? 1500000 :
+ ((index < 0x40) ? ((index - 0x20) * 25000) :
+ -EINVAL));
+ break;
+ case PM8607_ID_BUCK3:
+ ret = (index < 0x3d) ? (index * 25000) :
+ ((index < 0x40) ? 1500000 : -EINVAL);
+ if (ret < 0)
+ break;
+ if (info->slope_double)
+ ret <<= 1;
+ break;
+ case PM8607_ID_LDO1:
+ ret = (index == 0) ? 1800000 :
+ ((index == 1) ? 1200000 :
+ ((index == 2) ? 2800000 : -EINVAL));
+ break;
+ case PM8607_ID_LDO5:
+ ret = (index == 0) ? 2900000 :
+ ((index == 1) ? 3000000 :
+ ((index == 2) ? 3100000 : 3300000));
+ break;
+ case PM8607_ID_LDO7:
+ case PM8607_ID_LDO8:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_ID_LDO12:
+ ret = (index < 2) ? (index * 100000 + 1800000) :
+ ((index < 7) ? (index * 100000 + 2500000) :
+ ((index == 7) ? 3300000 : 1200000));
+ break;
+ case PM8607_ID_LDO2:
+ case PM8607_ID_LDO3:
+ case PM8607_ID_LDO9:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ 3300000);
+ break;
+ }
+ break;
+ case PM8607_ID_LDO4:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 6) ? (index * 50000 + 2550000) :
+ ((index == 6) ? 2900000 : 3300000));
+ break;
+ }
+ break;
+ case PM8607_ID_LDO6:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2450000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2500000) :
+ 3300000);
+ break;
+ }
+ break;
+ case PM8607_ID_LDO10:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ 1200000);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ ((index == 7) ? 3300000 : 1200000));
+ break;
+ }
+ break;
+ case PM8607_ID_LDO14:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2600000) :
+ 3300000);
+ break;
+ }
+ break;
+ }
+ return ret;
+}
+
+static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ uint8_t chip_id = info->chip->chip_id;
+ int val = -ENOENT;
+ int ret;
+
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
+ val = (min_uV - 775001) / 25000;
+ else { /* 25mV ~ 775mV / 25mV */
+ val = (min_uV + 249999) / 25000;
+ val += 32;
+ }
+ break;
+ case PM8607_ID_BUCK3:
+ if (info->slope_double)
+ min_uV = min_uV >> 1;
+ val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
+
+ break;
+ case PM8607_ID_LDO1:
+ if (min_uV > 1800000)
+ val = 2;
+ else if (min_uV > 1200000)
+ val = 0;
+ else
+ val = 1;
+ break;
+ case PM8607_ID_LDO5:
+ if (min_uV > 3100000)
+ val = 3;
+ else /* 2900mV ~ 3100mV / 100mV */
+ val = (min_uV - 2800001) / 100000;
+ break;
+ case PM8607_ID_LDO7:
+ case PM8607_ID_LDO8:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0; /* 1800mv */
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_ID_LDO10:
+ if (min_uV > 2850000)
+ val = 7;
+ else if (min_uV <= 1200000)
+ val = 8;
+ else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ val = (min_uV - 1750001) / 50000;
+ else { /* 2700mV ~ 2850mV / 50mV */
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ }
+ break;
+ case PM8607_ID_LDO12:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
+ if (min_uV <= 1200000)
+ val = 8; /* 1200mV */
+ else if (min_uV <= 1800000)
+ val = 0; /* 1800mV */
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1700001) / 100000;
+ else
+ val = 2; /* 2700mV */
+ } else { /* 2700mV ~ 3100mV / 100mV */
+ if (min_uV <= 3100000) {
+ val = (min_uV - 2600001) / 100000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_ID_LDO2:
+ case PM8607_ID_LDO3:
+ case PM8607_ID_LDO9:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2850mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO4:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2800mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 2900000)
+ val = 6;
+ else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO6:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2600000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2600mV */
+ } else { /* 2600mV ~ 2800mV / 50mV */
+ if (min_uV <= 2800000) {
+ val = (min_uV - 2550001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2600mV */
+ } else { /* 2600mV ~ 2800mV / 50mV */
+ if (min_uV <= 2800000) {
+ val = (min_uV - 2550001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO14:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ }
+ if (val >= 0) {
+ ret = pm8607_list_voltage(rdev, val);
+ if (ret > max_uV) {
+ pr_err("exceed voltage range (%d %d) uV",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ } else
+ pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
+ return val;
+}
+
+static int pm8607_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ uint8_t val, mask;
+ int ret;
+
+ if (check_range(info, min_uV, max_uV)) {
+ pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ ret = choose_voltage(rdev, min_uV, max_uV);
+ if (ret < 0)
+ return -EINVAL;
+ val = (uint8_t)(ret << info->vol_shift);
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+
+ ret = pm8607_set_bits(chip, info->vol_reg, mask, val);
+ if (ret)
+ return ret;
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ case PM8607_ID_BUCK3:
+ ret = pm8607_set_bits(chip, info->update_reg,
+ 1 << info->update_bit,
+ 1 << info->update_bit);
+ break;
+ }
+ return ret;
+}
+
+static int pm8607_get_voltage(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ uint8_t val, mask;
+ int ret;
+
+ ret = pm8607_reg_read(chip, info->vol_reg);
+ if (ret < 0)
+ return ret;
+
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+ val = ((unsigned char)ret & mask) >> info->vol_shift;
+
+ return pm8607_list_voltage(rdev, val);
+}
+
+static int pm8607_enable(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+
+ return pm8607_set_bits(chip, info->enable_reg,
+ 1 << info->enable_bit,
+ 1 << info->enable_bit);
+}
+
+static int pm8607_disable(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+
+ return pm8607_set_bits(chip, info->enable_reg,
+ 1 << info->enable_bit, 0);
+}
+
+static int pm8607_is_enabled(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ int ret;
+
+ ret = pm8607_reg_read(chip, info->enable_reg);
+ if (ret < 0)
+ return ret;
+
+ return !!((unsigned char)ret & (1 << info->enable_bit));
+}
+
+static struct regulator_ops pm8607_regulator_ops = {
+ .set_voltage = pm8607_set_voltage,
+ .get_voltage = pm8607_get_voltage,
+ .enable = pm8607_enable,
+ .disable = pm8607_disable,
+ .is_enabled = pm8607_is_enabled,
+};
+
+#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+{ \
+ .desc = { \
+ .name = "BUCK" #_id, \
+ .ops = &pm8607_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM8607_ID_BUCK##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = (min) * 1000, \
+ .max_uV = (max) * 1000, \
+ .step_uV = (step) * 1000, \
+ .vol_reg = PM8607_##vreg, \
+ .vol_shift = (0), \
+ .vol_nbits = (nbits), \
+ .update_reg = PM8607_##ureg, \
+ .update_bit = (ubit), \
+ .enable_reg = PM8607_##ereg, \
+ .enable_bit = (ebit), \
+ .slope_double = (0), \
+}
+
+#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
+{ \
+ .desc = { \
+ .name = "LDO" #_id, \
+ .ops = &pm8607_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM8607_ID_LDO##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = (min) * 1000, \
+ .max_uV = (max) * 1000, \
+ .step_uV = (step) * 1000, \
+ .vol_reg = PM8607_##vreg, \
+ .vol_shift = (shift), \
+ .vol_nbits = (nbits), \
+ .enable_reg = PM8607_##ereg, \
+ .enable_bit = (ebit), \
+ .slope_double = (0), \
+}
+
+static struct pm8607_regulator_info pm8607_regulator_info[] = {
+ PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
+ PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
+
+ PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
+ PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
+ PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
+ PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
+ PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
+ PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
+ PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
+ PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
+ PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
+ PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
+ PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
+ PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
+};
+
+static inline struct pm8607_regulator_info *find_regulator_info(int id)
+{
+ struct pm8607_regulator_info *info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
+ info = &pm8607_regulator_info[i];
+ if (info->desc.id == id)
+ return info;
+ }
+ return NULL;
+}
+
+static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
+{
+ struct pm8607_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm8607_platform_data *pdata = chip->dev->platform_data;
+ struct pm8607_regulator_info *info = NULL;
+
+ info = find_regulator_info(pdev->id);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+
+ info->chip = chip;
+
+ info->regulator = regulator_register(&info->desc, &pdev->dev,
+ pdata->regulator[pdev->id], info);
+ if (IS_ERR(info->regulator)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ return PTR_ERR(info->regulator);
+ }
+
+ /* check DVC ramp slope double */
+ if (info->desc.id == PM8607_ID_BUCK3)
+ if (info->chip->buck3_double)
+ info->slope_double = 1;
+
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
+
+static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
+{
+ struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
+
+ regulator_unregister(info->regulator);
+ return 0;
+}
+
+#define PM8607_REGULATOR_DRIVER(_name) \
+{ \
+ .driver = { \
+ .name = "88pm8607-" #_name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .probe = pm8607_regulator_probe, \
+ .remove = __devexit_p(pm8607_regulator_remove), \
+}
+
+static struct platform_driver pm8607_regulator_driver[] = {
+ PM8607_REGULATOR_DRIVER(buck1),
+ PM8607_REGULATOR_DRIVER(buck2),
+ PM8607_REGULATOR_DRIVER(buck3),
+ PM8607_REGULATOR_DRIVER(ldo1),
+ PM8607_REGULATOR_DRIVER(ldo2),
+ PM8607_REGULATOR_DRIVER(ldo3),
+ PM8607_REGULATOR_DRIVER(ldo4),
+ PM8607_REGULATOR_DRIVER(ldo5),
+ PM8607_REGULATOR_DRIVER(ldo6),
+ PM8607_REGULATOR_DRIVER(ldo7),
+ PM8607_REGULATOR_DRIVER(ldo8),
+ PM8607_REGULATOR_DRIVER(ldo9),
+ PM8607_REGULATOR_DRIVER(ldo10),
+ PM8607_REGULATOR_DRIVER(ldo12),
+ PM8607_REGULATOR_DRIVER(ldo14),
+};
+
+static int __init pm8607_regulator_init(void)
+{
+ int i, count, ret;
+
+ count = ARRAY_SIZE(pm8607_regulator_driver);
+ for (i = 0; i < count; i++) {
+ ret = platform_driver_register(&pm8607_regulator_driver[i]);
+ if (ret != 0)
+ pr_err("Failed to register regulator driver: %d\n",
+ ret);
+ }
+ return 0;
+}
+subsys_initcall(pm8607_regulator_init);
+
+static void __exit pm8607_regulator_exit(void)
+{
+ int i, count;
+
+ count = ARRAY_SIZE(pm8607_regulator_driver);
+ for (i = 0; i < count; i++)
+ platform_driver_unregister(&pm8607_regulator_driver[i]);
+}
+module_exit(pm8607_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC");
+MODULE_ALIAS("platform:88pm8607-regulator");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 7cfdd65..262f62e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -69,6 +69,13 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX8660
+ tristate "Maxim 8660/8661 voltage regulator"
+ depends on I2C
+ help
+ This driver controls a Maxim 8660/8661 voltage output
+ regulator via I2C bus.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
depends on TWL4030_CORE
@@ -157,5 +164,11 @@ config REGULATOR_TPS6507X
three step-down converters and two general-purpose LDO voltage regulators.
It supports TI's software based Class-2 SmartReflex implementation.
+config REGULATOR_88PM8607
+ bool "Marvell 88PM8607 Power regulators"
+ depends on MFD_88PM8607=y
+ help
+ This driver supports 88PM8607 voltage regulator chips.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9ae3cc4..b3c806c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
@@ -20,10 +21,11 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
-obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o
+obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 49aeee8..b349db4 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -81,7 +81,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
#define LDO_C_VOLTAGE 2650000
#define LDO_D_VOLTAGE 2650000
-static const int const ldo_e_buck_typ_voltages[] = {
+static const int ldo_e_buck_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -91,7 +91,7 @@ static const int const ldo_e_buck_typ_voltages[] = {
900000,
};
-static const int const ldo_f_typ_voltages[] = {
+static const int ldo_f_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -102,21 +102,21 @@ static const int const ldo_f_typ_voltages[] = {
2650000,
};
-static const int const ldo_g_typ_voltages[] = {
+static const int ldo_g_typ_voltages[] = {
2850000,
2750000,
1800000,
1500000,
};
-static const int const ldo_h_typ_voltages[] = {
+static const int ldo_h_typ_voltages[] = {
2750000,
1800000,
1500000,
1200000,
};
-static const int const ldo_k_typ_voltages[] = {
+static const int ldo_k_typ_voltages[] = {
2750000,
1800000,
};
@@ -241,24 +241,12 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
* LDO D is a special regulator. When it is disabled, the entire
* system is shut down. So this is handled specially.
*/
+ pr_info("Called ab3100_disable_regulator\n");
if (abreg->regreg == AB3100_LDO_D) {
- int i;
-
dev_info(&reg->dev, "disabling LDO D - shut down system\n");
- /*
- * Set regulators to default values, ignore any errors,
- * we're going DOWN
- */
- for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- (void) ab3100_set_register_interruptible(abreg->ab3100,
- ab3100_reg_init_order[i],
- abreg->plfdata->reg_initvals[i]);
- }
-
/* Setting LDO D to 0x00 cuts the power to the SoC */
return ab3100_set_register_interruptible(abreg->ab3100,
AB3100_LDO_D, 0x00U);
-
}
/*
@@ -607,13 +595,6 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
}
}
- if (err) {
- dev_err(&pdev->dev,
- "LDO D regulator initialization failed with error %d\n",
- err);
- return err;
- }
-
/* Register the regulators */
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
struct ab3100_regulator *reg = &ab3100_regulators[i];
@@ -688,7 +669,7 @@ static __init int ab3100_regulators_init(void)
static __exit void ab3100_regulators_exit(void)
{
- platform_driver_register(&ab3100_regulators_driver);
+ platform_driver_unregister(&ab3100_regulators_driver);
}
subsys_initcall(ab3100_regulators_init);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index efe568d..b60a4c9 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -66,6 +66,16 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+static const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ if (rdev->constraints && rdev->constraints->name)
+ return rdev->constraints->name;
+ else if (rdev->desc->name)
+ return rdev->desc->name;
+ else
+ return "";
+}
+
/* gets the regulator for a given consumer device */
static struct regulator *get_device_regulator(struct device *dev)
{
@@ -96,12 +106,12 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
@@ -124,12 +134,12 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
@@ -159,17 +169,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
if (!(rdev->constraints->valid_modes_mask & mode)) {
printk(KERN_ERR "%s: invalid mode %x for %s\n",
- __func__, mode, rdev->desc->name);
+ __func__, mode, rdev_get_name(rdev));
return -EINVAL;
}
return 0;
@@ -180,12 +190,12 @@ static int regulator_check_drms(struct regulator_dev *rdev)
{
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
return 0;
@@ -230,16 +240,8 @@ static ssize_t regulator_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
- const char *name;
- if (rdev->constraints && rdev->constraints->name)
- name = rdev->constraints->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "";
-
- return sprintf(buf, "%s\n", name);
+ return sprintf(buf, "%s\n", rdev_get_name(rdev));
}
static ssize_t regulator_print_opmode(char *buf, int mode)
@@ -388,7 +390,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
mutex_lock(&rdev->mutex);
list_for_each_entry(regulator, &rdev->consumer_list, list)
- uA += regulator->uA_load;
+ uA += regulator->uA_load;
mutex_unlock(&rdev->mutex);
return sprintf(buf, "%d\n", uA);
}
@@ -563,7 +565,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
/* calc total requested load */
list_for_each_entry(sibling, &rdev->consumer_list, list)
- current_uA += sibling->uA_load;
+ current_uA += sibling->uA_load;
/* now get the optimum mode for our new total regulator load */
mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -579,10 +581,29 @@ static int suspend_set_state(struct regulator_dev *rdev,
struct regulator_state *rstate)
{
int ret = 0;
+ bool can_set_state;
- /* enable & disable are mandatory for suspend control */
- if (!rdev->desc->ops->set_suspend_enable ||
- !rdev->desc->ops->set_suspend_disable) {
+ can_set_state = rdev->desc->ops->set_suspend_enable &&
+ rdev->desc->ops->set_suspend_disable;
+
+ /* If we have no suspend mode configration don't set anything;
+ * only warn if the driver actually makes the suspend mode
+ * configurable.
+ */
+ if (!rstate->enabled && !rstate->disabled) {
+ if (can_set_state)
+ printk(KERN_WARNING "%s: No configuration for %s\n",
+ __func__, rdev_get_name(rdev));
+ return 0;
+ }
+
+ if (rstate->enabled && rstate->disabled) {
+ printk(KERN_ERR "%s: invalid configuration for %s\n",
+ __func__, rdev_get_name(rdev));
+ return -EINVAL;
+ }
+
+ if (!can_set_state) {
printk(KERN_ERR "%s: no way to set suspend state\n",
__func__);
return -EINVAL;
@@ -640,26 +661,44 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
static void print_constraints(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
- char buf[80];
- int count;
+ char buf[80] = "";
+ int count = 0;
+ int ret;
- if (rdev->desc->type == REGULATOR_VOLTAGE) {
+ if (constraints->min_uV && constraints->max_uV) {
if (constraints->min_uV == constraints->max_uV)
- count = sprintf(buf, "%d mV ",
- constraints->min_uV / 1000);
+ count += sprintf(buf + count, "%d mV ",
+ constraints->min_uV / 1000);
else
- count = sprintf(buf, "%d <--> %d mV ",
- constraints->min_uV / 1000,
- constraints->max_uV / 1000);
- } else {
+ count += sprintf(buf + count, "%d <--> %d mV ",
+ constraints->min_uV / 1000,
+ constraints->max_uV / 1000);
+ }
+
+ if (!constraints->min_uV ||
+ constraints->min_uV != constraints->max_uV) {
+ ret = _regulator_get_voltage(rdev);
+ if (ret > 0)
+ count += sprintf(buf + count, "at %d mV ", ret / 1000);
+ }
+
+ if (constraints->min_uA && constraints->max_uA) {
if (constraints->min_uA == constraints->max_uA)
- count = sprintf(buf, "%d mA ",
- constraints->min_uA / 1000);
+ count += sprintf(buf + count, "%d mA ",
+ constraints->min_uA / 1000);
else
- count = sprintf(buf, "%d <--> %d mA ",
- constraints->min_uA / 1000,
- constraints->max_uA / 1000);
+ count += sprintf(buf + count, "%d <--> %d mA ",
+ constraints->min_uA / 1000,
+ constraints->max_uA / 1000);
}
+
+ if (!constraints->min_uA ||
+ constraints->min_uA != constraints->max_uA) {
+ ret = _regulator_get_current_limit(rdev);
+ if (ret > 0)
+ count += sprintf(buf + count, "at %d uA ", ret / 1000);
+ }
+
if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
count += sprintf(buf + count, "fast ");
if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
@@ -669,33 +708,30 @@ static void print_constraints(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += sprintf(buf + count, "standby");
- printk(KERN_INFO "regulator: %s: %s\n", rdev->desc->name, buf);
+ printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
}
-/**
- * set_machine_constraints - sets regulator constraints
- * @rdev: regulator source
- * @constraints: constraints to apply
- *
- * Allows platform initialisation code to define and constrain
- * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
- * Constraints *must* be set by platform code in order for some
- * regulator operations to proceed i.e. set_voltage, set_current_limit,
- * set_mode.
- */
-static int set_machine_constraints(struct regulator_dev *rdev,
+static int machine_constraints_voltage(struct regulator_dev *rdev,
struct regulation_constraints *constraints)
{
- int ret = 0;
- const char *name;
struct regulator_ops *ops = rdev->desc->ops;
+ const char *name = rdev_get_name(rdev);
+ int ret;
- if (constraints->name)
- name = constraints->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "regulator";
+ /* do we need to apply the constraint voltage */
+ if (rdev->constraints->apply_uV &&
+ rdev->constraints->min_uV == rdev->constraints->max_uV &&
+ ops->set_voltage) {
+ ret = ops->set_voltage(rdev,
+ rdev->constraints->min_uV, rdev->constraints->max_uV);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
+ __func__,
+ rdev->constraints->min_uV, name);
+ rdev->constraints = NULL;
+ return ret;
+ }
+ }
/* constrain machine-level voltage specs to fit
* the actual range supported by this regulator.
@@ -719,14 +755,13 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* voltage constraints are optional */
if ((cmin == 0) && (cmax == 0))
- goto out;
+ return 0;
/* else require explicit machine-level constraints */
if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
pr_err("%s: %s '%s' voltage constraints\n",
__func__, "invalid", name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
@@ -748,8 +783,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (max_uV < min_uV) {
pr_err("%s: %s '%s' voltage constraints\n",
__func__, "unsupportable", name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* use regulator's subset of machine constraints */
@@ -767,22 +801,34 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ return 0;
+}
+
+/**
+ * set_machine_constraints - sets regulator constraints
+ * @rdev: regulator source
+ * @constraints: constraints to apply
+ *
+ * Allows platform initialisation code to define and constrain
+ * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
+ * Constraints *must* be set by platform code in order for some
+ * regulator operations to proceed i.e. set_voltage, set_current_limit,
+ * set_mode.
+ */
+static int set_machine_constraints(struct regulator_dev *rdev,
+ struct regulation_constraints *constraints)
+{
+ int ret = 0;
+ const char *name;
+ struct regulator_ops *ops = rdev->desc->ops;
+
rdev->constraints = constraints;
- /* do we need to apply the constraint voltage */
- if (rdev->constraints->apply_uV &&
- rdev->constraints->min_uV == rdev->constraints->max_uV &&
- ops->set_voltage) {
- ret = ops->set_voltage(rdev,
- rdev->constraints->min_uV, rdev->constraints->max_uV);
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
- __func__,
- rdev->constraints->min_uV, name);
- rdev->constraints = NULL;
- goto out;
- }
- }
+ name = rdev_get_name(rdev);
+
+ ret = machine_constraints_voltage(rdev, constraints);
+ if (ret != 0)
+ goto out;
/* do we need to setup our suspend state */
if (constraints->initial_state) {
@@ -903,7 +949,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
dev_name(&node->regulator->dev),
node->regulator->desc->name,
supply,
- dev_name(&rdev->dev), rdev->desc->name);
+ dev_name(&rdev->dev), rdev_get_name(rdev));
return -EBUSY;
}
@@ -1212,7 +1258,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
ret = _regulator_enable(rdev->supply);
if (ret < 0) {
printk(KERN_ERR "%s: failed to enable %s: %d\n",
- __func__, rdev->desc->name, ret);
+ __func__, rdev_get_name(rdev), ret);
return ret;
}
}
@@ -1238,7 +1284,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
}
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
- __func__, rdev->desc->name, ret);
+ __func__, rdev_get_name(rdev), ret);
return ret;
}
/* Fallthrough on positive return values - already enabled */
@@ -1279,7 +1325,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n",
- rdev->desc->name))
+ rdev_get_name(rdev)))
return -EIO;
/* are we the last user and permitted to disable ? */
@@ -1292,7 +1338,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
printk(KERN_ERR "%s: failed to disable %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return ret;
}
}
@@ -1349,7 +1395,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
printk(KERN_ERR "%s: failed to force disable %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return ret;
}
/* notify other consumers that power has been forced off */
@@ -1766,7 +1812,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
output_uV = rdev->desc->ops->get_voltage(rdev);
if (output_uV <= 0) {
printk(KERN_ERR "%s: invalid output voltage found for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
@@ -1777,13 +1823,13 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
printk(KERN_ERR "%s: invalid input voltage found for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
/* calc total requested load for this regulator */
list_for_each_entry(consumer, &rdev->consumer_list, list)
- total_uA_load += consumer->uA_load;
+ total_uA_load += consumer->uA_load;
mode = rdev->desc->ops->get_optimum_mode(rdev,
input_uV, output_uV,
@@ -1791,7 +1837,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
ret = regulator_check_mode(rdev, mode);
if (ret < 0) {
printk(KERN_ERR "%s: failed to get optimum mode for %s @"
- " %d uA %d -> %d uV\n", __func__, rdev->desc->name,
+ " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
total_uA_load, input_uV, output_uV);
goto out;
}
@@ -1799,7 +1845,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
ret = rdev->desc->ops->set_mode(rdev, mode);
if (ret < 0) {
printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
- __func__, mode, rdev->desc->name);
+ __func__, mode, rdev_get_name(rdev));
goto out;
}
ret = mode;
@@ -1852,9 +1898,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
/* now notify regulator we supply */
list_for_each_entry(_rdev, &rdev->supply_list, slist) {
- mutex_lock(&_rdev->mutex);
- _notifier_call_chain(_rdev, event, data);
- mutex_unlock(&_rdev->mutex);
+ mutex_lock(&_rdev->mutex);
+ _notifier_call_chain(_rdev, event, data);
+ mutex_unlock(&_rdev->mutex);
}
}
@@ -1885,9 +1931,9 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- dev_err(dev, "Failed to get supply '%s'\n",
- consumers[i].supply);
ret = PTR_ERR(consumers[i].consumer);
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
consumers[i].consumer = NULL;
goto err;
}
@@ -1930,8 +1976,8 @@ int regulator_bulk_enable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to enable %s\n", consumers[i].supply);
- for (i = 0; i < num_consumers; i++)
+ printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+ for (--i; i >= 0; --i)
regulator_disable(consumers[i].consumer);
return ret;
@@ -1965,8 +2011,9 @@ int regulator_bulk_disable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to disable %s\n", consumers[i].supply);
- for (i = 0; i < num_consumers; i++)
+ printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
+ ret);
+ for (--i; i >= 0; --i)
regulator_enable(consumers[i].consumer);
return ret;
@@ -2316,7 +2363,7 @@ int regulator_suspend_prepare(suspend_state_t state)
if (ret < 0) {
printk(KERN_ERR "%s: failed to prepare %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
}
@@ -2429,12 +2476,7 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (c && c->name)
- name = c->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "regulator";
+ name = rdev_get_name(rdev);
if (!ops->disable || (c && c->always_on))
continue;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index aa224d9..f8c4661 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -331,7 +331,7 @@ static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
unsigned selector)
{
- if (selector > ARRAY_SIZE(da9034_ldo12_data))
+ if (selector >= ARRAY_SIZE(da9034_ldo12_data))
return -EINVAL;
return da9034_ldo12_data[selector] * 1000;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 7803a32..4f33a0f 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
if (vol_map[val] >= min_vol)
break;
- if (vol_map[val] > max_vol)
+ if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
if (vol_map[val] >= min_vol)
break;
- if (vol_map[val] > max_vol)
+ if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
@@ -446,8 +446,8 @@ static int setup_regulators(struct lp3971 *lp3971,
lp3971->rdev[i] = regulator_register(&regulators[id],
lp3971->dev, pdata->regulators[i].initdata, lp3971);
- err = IS_ERR(lp3971->rdev[i]);
- if (err) {
+ if (IS_ERR(lp3971->rdev[i])) {
+ err = PTR_ERR(lp3971->rdev[i]);
dev_err(lp3971->dev, "regulator init failed: %d\n",
err);
goto error;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
new file mode 100644
index 0000000..acc2fb7
--- /dev/null
+++ b/drivers/regulator/max8660.c
@@ -0,0 +1,510 @@
+/*
+ * max8660.c -- Voltage regulation for the Maxim 8660/8661
+ *
+ * based on max1586.c and wm8400-regulator.c
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Some info:
+ *
+ * Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8660-MAX8661.pdf
+ *
+ * This chip is a bit nasty because it is a write-only device. Thus, the driver
+ * uses shadow registers to keep track of its values. The main problem appears
+ * to be the initialization: When Linux boots up, we cannot know if the chip is
+ * in the default state or not, so we would have to pass such information in
+ * platform_data. As this adds a bit of complexity to the driver, this is left
+ * out for now until it is really needed.
+ *
+ * [A|S|M]DTV1 registers are currently not used, but [A|S|M]DTV2.
+ *
+ * If the driver is feature complete, it might be worth to check if one set of
+ * functions for V3-V7 is sufficient. For maximum flexibility during
+ * development, they are separated for now.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max8660.h>
+
+#define MAX8660_DCDC_MIN_UV 725000
+#define MAX8660_DCDC_MAX_UV 1800000
+#define MAX8660_DCDC_STEP 25000
+#define MAX8660_DCDC_MAX_SEL 0x2b
+
+#define MAX8660_LDO5_MIN_UV 1700000
+#define MAX8660_LDO5_MAX_UV 2000000
+#define MAX8660_LDO5_STEP 25000
+#define MAX8660_LDO5_MAX_SEL 0x0c
+
+#define MAX8660_LDO67_MIN_UV 1800000
+#define MAX8660_LDO67_MAX_UV 3300000
+#define MAX8660_LDO67_STEP 100000
+#define MAX8660_LDO67_MAX_SEL 0x0f
+
+enum {
+ MAX8660_OVER1,
+ MAX8660_OVER2,
+ MAX8660_VCC1,
+ MAX8660_ADTV1,
+ MAX8660_ADTV2,
+ MAX8660_SDTV1,
+ MAX8660_SDTV2,
+ MAX8660_MDTV1,
+ MAX8660_MDTV2,
+ MAX8660_L12VCR,
+ MAX8660_FPWM,
+ MAX8660_N_REGS, /* not a real register */
+};
+
+struct max8660 {
+ struct i2c_client *client;
+ u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
+ struct regulator_dev *rdev[];
+};
+
+static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
+{
+ static const u8 max8660_addresses[MAX8660_N_REGS] =
+ { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
+
+ int ret;
+ u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
+ dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
+ max8660_addresses[reg], reg_val);
+
+ ret = i2c_smbus_write_byte_data(max8660->client,
+ max8660_addresses[reg], reg_val);
+ if (ret == 0)
+ max8660->shadow_regs[reg] = reg_val;
+
+ return ret;
+}
+
+
+/*
+ * DCDC functions
+ */
+
+static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 val = max8660->shadow_regs[MAX8660_OVER1];
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+ return !!(val & mask);
+}
+
+static int max8660_dcdc_enable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+ return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
+}
+
+static int max8660_dcdc_disable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
+ return max8660_write(max8660, MAX8660_OVER1, mask, 0);
+}
+
+static int max8660_dcdc_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_DCDC_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
+}
+
+static int max8660_dcdc_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
+ u8 selector = max8660->shadow_regs[reg];
+ return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
+}
+
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 reg, selector, bits;
+ int ret;
+
+ if (min_uV < MAX8660_DCDC_MIN_UV || min_uV > MAX8660_DCDC_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_DCDC_MIN_UV || max_uV > MAX8660_DCDC_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
+ / MAX8660_DCDC_STEP;
+
+ ret = max8660_dcdc_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
+ ret = max8660_write(max8660, reg, 0, selector);
+ if (ret)
+ return ret;
+
+ /* Select target voltage register and activate regulation */
+ bits = (rdev_get_id(rdev) == MAX8660_V3) ? 0x03 : 0x30;
+ return max8660_write(max8660, MAX8660_VCC1, 0xff, bits);
+}
+
+static struct regulator_ops max8660_dcdc_ops = {
+ .is_enabled = max8660_dcdc_is_enabled,
+ .list_voltage = max8660_dcdc_list,
+ .set_voltage = max8660_dcdc_set,
+ .get_voltage = max8660_dcdc_get,
+};
+
+
+/*
+ * LDO5 functions
+ */
+
+static int max8660_ldo5_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_LDO5_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
+}
+
+static int max8660_ldo5_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector = max8660->shadow_regs[MAX8660_MDTV2];
+
+ return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
+}
+
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector;
+ int ret;
+
+ if (min_uV < MAX8660_LDO5_MIN_UV || min_uV > MAX8660_LDO5_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_LDO5_MIN_UV || max_uV > MAX8660_LDO5_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_LDO5_MIN_UV - MAX8660_LDO5_STEP + 1))
+ / MAX8660_LDO5_STEP;
+ ret = max8660_ldo5_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
+ if (ret)
+ return ret;
+
+ /* Select target voltage register and activate regulation */
+ return max8660_write(max8660, MAX8660_VCC1, 0xff, 0xc0);
+}
+
+static struct regulator_ops max8660_ldo5_ops = {
+ .list_voltage = max8660_ldo5_list,
+ .set_voltage = max8660_ldo5_set,
+ .get_voltage = max8660_ldo5_get,
+};
+
+
+/*
+ * LDO67 functions
+ */
+
+static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 val = max8660->shadow_regs[MAX8660_OVER2];
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+ return !!(val & mask);
+}
+
+static int max8660_ldo67_enable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+ return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
+}
+
+static int max8660_ldo67_disable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
+ return max8660_write(max8660, MAX8660_OVER2, mask, 0);
+}
+
+static int max8660_ldo67_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_LDO67_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
+}
+
+static int max8660_ldo67_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
+ u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
+
+ return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
+}
+
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector;
+ int ret;
+
+ if (min_uV < MAX8660_LDO67_MIN_UV || min_uV > MAX8660_LDO67_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_LDO67_MIN_UV || max_uV > MAX8660_LDO67_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_LDO67_MIN_UV - MAX8660_LDO67_STEP + 1))
+ / MAX8660_LDO67_STEP;
+
+ ret = max8660_ldo67_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ if (rdev_get_id(rdev) == MAX8660_V6)
+ return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
+ else
+ return max8660_write(max8660, MAX8660_L12VCR, 0x0f, selector << 4);
+}
+
+static struct regulator_ops max8660_ldo67_ops = {
+ .is_enabled = max8660_ldo67_is_enabled,
+ .enable = max8660_ldo67_enable,
+ .disable = max8660_ldo67_disable,
+ .list_voltage = max8660_ldo67_list,
+ .get_voltage = max8660_ldo67_get,
+ .set_voltage = max8660_ldo67_set,
+};
+
+static struct regulator_desc max8660_reg[] = {
+ {
+ .name = "V3(DCDC)",
+ .id = MAX8660_V3,
+ .ops = &max8660_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V4(DCDC)",
+ .id = MAX8660_V4,
+ .ops = &max8660_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V5(LDO)",
+ .id = MAX8660_V5,
+ .ops = &max8660_ldo5_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO5_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V6(LDO)",
+ .id = MAX8660_V6,
+ .ops = &max8660_ldo67_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V7(LDO)",
+ .id = MAX8660_V7,
+ .ops = &max8660_ldo67_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int max8660_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regulator_dev **rdev;
+ struct max8660_platform_data *pdata = client->dev.platform_data;
+ struct max8660 *max8660;
+ int boot_on, i, id, ret = -EINVAL;
+
+ if (pdata->num_subdevs > MAX8660_V_END) {
+ dev_err(&client->dev, "Too much regulators found!\n");
+ goto out;
+ }
+
+ max8660 = kzalloc(sizeof(struct max8660) +
+ sizeof(struct regulator_dev *) * MAX8660_V_END,
+ GFP_KERNEL);
+ if (!max8660) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ max8660->client = client;
+ rdev = max8660->rdev;
+
+ if (pdata->en34_is_high) {
+ /* Simulate always on */
+ max8660->shadow_regs[MAX8660_OVER1] = 5;
+ } else {
+ /* Otherwise devices can be toggled via software */
+ max8660_dcdc_ops.enable = max8660_dcdc_enable;
+ max8660_dcdc_ops.disable = max8660_dcdc_disable;
+ }
+
+ /*
+ * First, set up shadow registers to prevent glitches. As some
+ * registers are shared between regulators, everything must be properly
+ * set up for all regulators in advance.
+ */
+ max8660->shadow_regs[MAX8660_ADTV1] =
+ max8660->shadow_regs[MAX8660_ADTV2] =
+ max8660->shadow_regs[MAX8660_SDTV1] =
+ max8660->shadow_regs[MAX8660_SDTV2] = 0x1b;
+ max8660->shadow_regs[MAX8660_MDTV1] =
+ max8660->shadow_regs[MAX8660_MDTV2] = 0x04;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+
+ if (!pdata->subdevs[i].platform_data)
+ goto err_free;
+
+ boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
+
+ switch (pdata->subdevs[i].id) {
+ case MAX8660_V3:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER1] |= 1;
+ break;
+
+ case MAX8660_V4:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER1] |= 4;
+ break;
+
+ case MAX8660_V5:
+ break;
+
+ case MAX8660_V6:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER2] |= 2;
+ break;
+
+ case MAX8660_V7:
+ if (!strcmp(i2c_id->name, "max8661")) {
+ dev_err(&client->dev, "Regulator not on this chip!\n");
+ goto err_free;
+ }
+
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER2] |= 4;
+ break;
+
+ default:
+ dev_err(&client->dev, "invalid regulator %s\n",
+ pdata->subdevs[i].name);
+ goto err_free;
+ }
+ }
+
+ /* Finally register devices */
+ for (i = 0; i < pdata->num_subdevs; i++) {
+
+ id = pdata->subdevs[i].id;
+
+ rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
+ pdata->subdevs[i].platform_data,
+ max8660);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&client->dev, "failed to register %s\n",
+ max8660_reg[id].name);
+ goto err_unregister;
+ }
+ }
+
+ i2c_set_clientdata(client, rdev);
+ dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
+ return 0;
+
+err_unregister:
+ while (--i >= 0)
+ regulator_unregister(rdev[i]);
+err_free:
+ kfree(max8660);
+out:
+ return ret;
+}
+
+static int max8660_remove(struct i2c_client *client)
+{
+ struct regulator_dev **rdev = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < MAX8660_V_END; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+ kfree(rdev);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8660_id[] = {
+ { "max8660", 0 },
+ { "max8661", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8660_id);
+
+static struct i2c_driver max8660_driver = {
+ .probe = max8660_probe,
+ .remove = max8660_remove,
+ .driver = {
+ .name = "max8660",
+ },
+ .id_table = max8660_id,
+};
+
+static int __init max8660_init(void)
+{
+ return i2c_add_driver(&max8660_driver);
+}
+subsys_initcall(max8660_init);
+
+static void __exit max8660_exit(void)
+{
+ i2c_del_driver(&max8660_driver);
+}
+module_exit(max8660_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MAXIM 8660/8661 voltage regulator driver");
+MODULE_AUTHOR("Wolfram Sang");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
new file mode 100644
index 0000000..39c4953
--- /dev/null
+++ b/drivers/regulator/mc13783-regulator.c
@@ -0,0 +1,245 @@
+/*
+ * Regulator Driver for Freescale MC13783 PMIC
+ *
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13783.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#define MC13783_REG_SWITCHERS4 28
+#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
+
+#define MC13783_REG_SWITCHERS5 29
+#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
+
+#define MC13783_REG_REGULATORMODE0 32
+#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
+#define MC13783_REG_REGULATORMODE0_VIOHIEN (1 << 3)
+#define MC13783_REG_REGULATORMODE0_VIOLOEN (1 << 6)
+#define MC13783_REG_REGULATORMODE0_VDIGEN (1 << 9)
+#define MC13783_REG_REGULATORMODE0_VGENEN (1 << 12)
+#define MC13783_REG_REGULATORMODE0_VRFDIGEN (1 << 15)
+#define MC13783_REG_REGULATORMODE0_VRFREFEN (1 << 18)
+#define MC13783_REG_REGULATORMODE0_VRFCPEN (1 << 21)
+
+#define MC13783_REG_REGULATORMODE1 33
+#define MC13783_REG_REGULATORMODE1_VSIMEN (1 << 0)
+#define MC13783_REG_REGULATORMODE1_VESIMEN (1 << 3)
+#define MC13783_REG_REGULATORMODE1_VCAMEN (1 << 6)
+#define MC13783_REG_REGULATORMODE1_VRFBGEN (1 << 9)
+#define MC13783_REG_REGULATORMODE1_VVIBEN (1 << 11)
+#define MC13783_REG_REGULATORMODE1_VRF1EN (1 << 12)
+#define MC13783_REG_REGULATORMODE1_VRF2EN (1 << 15)
+#define MC13783_REG_REGULATORMODE1_VMMC1EN (1 << 18)
+#define MC13783_REG_REGULATORMODE1_VMMC2EN (1 << 21)
+
+#define MC13783_REG_POWERMISC 34
+#define MC13783_REG_POWERMISC_GPO1EN (1 << 6)
+#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
+#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
+#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
+
+struct mc13783_regulator {
+ struct regulator_desc desc;
+ int reg;
+ int enable_bit;
+};
+
+static struct regulator_ops mc13783_regulator_ops;
+
+#define MC13783_DEFINE(prefix, _name, _reg) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .ops = &mc13783_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ }
+
+#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
+#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
+
+static struct mc13783_regulator mc13783_regulators[] = {
+ MC13783_DEFINE_SW(SW3, SWITCHERS5),
+ MC13783_DEFINE_SW(PLL, SWITCHERS4),
+
+ MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
+ MC13783_DEFINE_REGU(GPO1, POWERMISC),
+ MC13783_DEFINE_REGU(GPO2, POWERMISC),
+ MC13783_DEFINE_REGU(GPO3, POWERMISC),
+ MC13783_DEFINE_REGU(GPO4, POWERMISC),
+};
+
+struct mc13783_regulator_priv {
+ struct mc13783 *mc13783;
+ struct regulator_dev *regulators[];
+};
+
+static int mc13783_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
+ mc13783_regulators[id].enable_bit,
+ mc13783_regulators[id].enable_bit);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
+ mc13783_regulators[id].enable_bit, 0);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ return (val & mc13783_regulators[id].enable_bit) != 0;
+}
+
+static struct regulator_ops mc13783_regulator_ops = {
+ .enable = mc13783_regulator_enable,
+ .disable = mc13783_regulator_disable,
+ .is_enabled = mc13783_regulator_is_enabled,
+};
+
+static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
+{
+ struct mc13783_regulator_priv *priv;
+ struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+ struct mc13783_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_init_data *init_data;
+ int i, ret;
+
+ dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
+
+ priv = kzalloc(sizeof(*priv) +
+ pdata->num_regulators * sizeof(priv->regulators[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13783 = mc13783;
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ init_data = &pdata->regulators[i];
+ priv->regulators[i] = regulator_register(
+ &mc13783_regulators[init_data->id].desc,
+ &pdev->dev, init_data->init_data, priv);
+
+ if (IS_ERR(priv->regulators[i])) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ mc13783_regulators[i].desc.name);
+ ret = PTR_ERR(priv->regulators[i]);
+ goto err;
+ }
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+err:
+ while (--i >= 0)
+ regulator_unregister(priv->regulators[i]);
+
+ kfree(priv);
+
+ return ret;
+}
+
+static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
+{
+ struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13783_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < pdata->num_regulators; i++)
+ regulator_unregister(priv->regulators[i]);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_regulator_driver = {
+ .driver = {
+ .name = "mc13783-regulator",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mc13783_regulator_remove),
+ .probe = mc13783_regulator_probe,
+};
+
+static int __init mc13783_regulator_init(void)
+{
+ return platform_driver_register(&mc13783_regulator_driver);
+}
+subsys_initcall(mc13783_regulator_init);
+
+static void __exit mc13783_regulator_exit(void)
+{
+ platform_driver_unregister(&mc13783_regulator_driver);
+}
+module_exit(mc13783_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
+MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/mc13783.c b/drivers/regulator/mc13783.c
deleted file mode 100644
index 710211f..0000000
--- a/drivers/regulator/mc13783.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Regulator Driver for Freescale MC13783 PMIC
- *
- * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/mfd/mc13783-private.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/driver.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/mc13783.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-
-struct mc13783_regulator {
- struct regulator_desc desc;
- int reg;
- int enable_bit;
-};
-
-static struct regulator_ops mc13783_regulator_ops;
-
-static struct mc13783_regulator mc13783_regulators[] = {
- [MC13783_SW_SW3] = {
- .desc = {
- .name = "SW_SW3",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_SW_SW3,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_SWITCHERS_5,
- .enable_bit = MC13783_SWCTRL_SW3_EN,
- },
- [MC13783_SW_PLL] = {
- .desc = {
- .name = "SW_PLL",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_SW_PLL,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_SWITCHERS_4,
- .enable_bit = MC13783_SWCTRL_PLL_EN,
- },
- [MC13783_REGU_VAUDIO] = {
- .desc = {
- .name = "REGU_VAUDIO",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VAUDIO,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VAUDIO_EN,
- },
- [MC13783_REGU_VIOHI] = {
- .desc = {
- .name = "REGU_VIOHI",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VIOHI,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VIOHI_EN,
- },
- [MC13783_REGU_VIOLO] = {
- .desc = {
- .name = "REGU_VIOLO",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VIOLO,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VIOLO_EN,
- },
- [MC13783_REGU_VDIG] = {
- .desc = {
- .name = "REGU_VDIG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VDIG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VDIG_EN,
- },
- [MC13783_REGU_VGEN] = {
- .desc = {
- .name = "REGU_VGEN",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VGEN,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VGEN_EN,
- },
- [MC13783_REGU_VRFDIG] = {
- .desc = {
- .name = "REGU_VRFDIG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFDIG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFDIG_EN,
- },
- [MC13783_REGU_VRFREF] = {
- .desc = {
- .name = "REGU_VRFREF",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFREF,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFREF_EN,
- },
- [MC13783_REGU_VRFCP] = {
- .desc = {
- .name = "REGU_VRFCP",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFCP,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFCP_EN,
- },
- [MC13783_REGU_VSIM] = {
- .desc = {
- .name = "REGU_VSIM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VSIM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VSIM_EN,
- },
- [MC13783_REGU_VESIM] = {
- .desc = {
- .name = "REGU_VESIM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VESIM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VESIM_EN,
- },
- [MC13783_REGU_VCAM] = {
- .desc = {
- .name = "REGU_VCAM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VCAM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VCAM_EN,
- },
- [MC13783_REGU_VRFBG] = {
- .desc = {
- .name = "REGU_VRFBG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFBG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRFBG_EN,
- },
- [MC13783_REGU_VVIB] = {
- .desc = {
- .name = "REGU_VVIB",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VVIB,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VVIB_EN,
- },
- [MC13783_REGU_VRF1] = {
- .desc = {
- .name = "REGU_VRF1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRF1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRF1_EN,
- },
- [MC13783_REGU_VRF2] = {
- .desc = {
- .name = "REGU_VRF2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRF2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRF2_EN,
- },
- [MC13783_REGU_VMMC1] = {
- .desc = {
- .name = "REGU_VMMC1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VMMC1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VMMC1_EN,
- },
- [MC13783_REGU_VMMC2] = {
- .desc = {
- .name = "REGU_VMMC2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VMMC2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VMMC2_EN,
- },
- [MC13783_REGU_GPO1] = {
- .desc = {
- .name = "REGU_GPO1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO1_EN,
- },
- [MC13783_REGU_GPO2] = {
- .desc = {
- .name = "REGU_GPO2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO2_EN,
- },
- [MC13783_REGU_GPO3] = {
- .desc = {
- .name = "REGU_GPO3",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO3,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO3_EN,
- },
- [MC13783_REGU_GPO4] = {
- .desc = {
- .name = "REGU_GPO4",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO4,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO4_EN,
- },
-};
-
-struct mc13783_priv {
- struct regulator_desc desc[ARRAY_SIZE(mc13783_regulators)];
- struct mc13783 *mc13783;
- struct regulator_dev *regulators[0];
-};
-
-static int mc13783_enable(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit,
- mc13783_regulators[id].enable_bit);
-}
-
-static int mc13783_disable(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit, 0);
-}
-
-static int mc13783_is_enabled(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int ret, id = rdev_get_id(rdev);
- unsigned int val;
-
- ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
- if (ret)
- return ret;
-
- return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
- .enable = mc13783_enable,
- .disable = mc13783_disable,
- .is_enabled = mc13783_is_enabled,
-};
-
-static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
-{
- struct mc13783_priv *priv;
- struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_init_data *init_data;
- int i, ret;
-
- dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
-
- priv = kzalloc(sizeof(*priv) + mc13783->num_regulators * sizeof(void *),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->mc13783 = mc13783;
-
- for (i = 0; i < mc13783->num_regulators; i++) {
- init_data = &mc13783->regulators[i];
- priv->regulators[i] = regulator_register(
- &mc13783_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
-
- if (IS_ERR(priv->regulators[i])) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- mc13783_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
- }
- }
-
- platform_set_drvdata(pdev, priv);
-
- return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
-
- kfree(priv);
-
- return ret;
-}
-
-static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
-{
- struct mc13783_priv *priv = platform_get_drvdata(pdev);
- struct mc13783 *mc13783 = priv->mc13783;
- int i;
-
- for (i = 0; i < mc13783->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
-}
-
-static struct platform_driver mc13783_regulator_driver = {
- .driver = {
- .name = "mc13783-regulator",
- .owner = THIS_MODULE,
- },
- .remove = __devexit_p(mc13783_regulator_remove),
-};
-
-static int __init mc13783_regulator_init(void)
-{
- return platform_driver_probe(&mc13783_regulator_driver,
- mc13783_regulator_probe);
-}
-subsys_initcall(mc13783_regulator_init);
-
-static void __exit mc13783_regulator_exit(void)
-{
- platform_driver_unregister(&mc13783_regulator_driver);
-}
-module_exit(mc13783_regulator_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
-MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
-MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7ea1c3a..7e67485 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -40,6 +41,12 @@ struct twlreg_info {
u8 table_len;
const u16 *table;
+ /* regulator specific turn-on delay */
+ u16 delay;
+
+ /* State REMAP default configuration */
+ u8 remap;
+
/* chip constraints on regulator behavior */
u16 min_mV;
@@ -128,6 +135,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
+ int ret;
grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
@@ -138,7 +146,11 @@ static int twlreg_enable(struct regulator_dev *rdev)
else
grp |= P1_GRP_6030;
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+
+ udelay(info->delay);
+
+ return ret;
}
static int twlreg_disable(struct regulator_dev *rdev)
@@ -151,9 +163,9 @@ static int twlreg_disable(struct regulator_dev *rdev)
return grp;
if (twl_class_is_4030())
- grp &= ~P1_GRP_4030;
+ grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
else
- grp &= ~P1_GRP_6030;
+ grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
@@ -294,6 +306,18 @@ static const u16 VSIM_VSEL_table[] = {
static const u16 VDAC_VSEL_table[] = {
1200, 1300, 1800, 1800,
};
+static const u16 VDD1_VSEL_table[] = {
+ 800, 1450,
+};
+static const u16 VDD2_VSEL_table[] = {
+ 800, 1450, 1500,
+};
+static const u16 VIO_VSEL_table[] = {
+ 1800, 1850,
+};
+static const u16 VINTANA2_VSEL_table[] = {
+ 2500, 2750,
+};
static const u16 VAUX1_6030_VSEL_table[] = {
1000, 1300, 1800, 2500,
2800, 2900, 3000, 3000,
@@ -414,20 +438,30 @@ static struct regulator_ops twlfixed_ops = {
/*----------------------------------------------------------------------*/
-#define TWL4030_ADJUSTABLE_LDO(label, offset, num) \
- TWL_ADJUSTABLE_LDO(label, offset, num, TWL4030)
-#define TWL4030_FIXED_LDO(label, offset, mVolts, num) \
- TWL_FIXED_LDO(label, offset, mVolts, num, TWL4030)
-#define TWL6030_ADJUSTABLE_LDO(label, offset, num) \
- TWL_ADJUSTABLE_LDO(label, offset, num, TWL6030)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num) \
- TWL_FIXED_LDO(label, offset, mVolts, num, TWL6030)
-
-#define TWL_ADJUSTABLE_LDO(label, offset, num, family) { \
+#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf, TWL4030)
+#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf, TWL4030)
+#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf, TWL6030)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf, TWL6030)
+
+#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
+ family) { \
.base = offset, \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
.table = label##_VSEL_table, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = family##_REG_##label, \
@@ -438,10 +472,13 @@ static struct regulator_ops twlfixed_ops = {
}, \
}
-#define TWL_FIXED_LDO(label, offset, mVolts, num, family) { \
+#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
+ family) { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
.desc = { \
.name = #label, \
.id = family##_REG_##label, \
@@ -457,43 +494,41 @@ static struct regulator_ops twlfixed_ops = {
* software control over them after boot.
*/
static struct twlreg_info twl_regs[] = {
- TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1),
- TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2),
- TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2),
- TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3),
- TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4),
- TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5),
- TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6),
- /*
- TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7),
- */
- TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8),
- TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9),
- TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10),
- /*
- TWL4030_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11),
- TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12),
- TWL4030_ADJUSTABLE_LDO(VINTDIG, 0x47, 13),
- TWL4030_SMPS(VIO, 0x4b, 14),
- TWL4030_SMPS(VDD1, 0x55, 15),
- TWL4030_SMPS(VDD2, 0x63, 16),
- */
- TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
- TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
- TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
+ TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7, 100, 0x00),
+ TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00),
+ TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08),
+ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
+ TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
/* VUSBCP is managed *only* by the USB subchip */
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18)
+ /* Turnon-delay and remap configuration values for 6030 are not
+ verified since the specification is not public */
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
};
static int twlreg_probe(struct platform_device *pdev)
@@ -525,6 +560,19 @@ static int twlreg_probe(struct platform_device *pdev)
c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS;
+ switch (pdev->id) {
+ case TWL4030_REG_VIO:
+ case TWL4030_REG_VDD1:
+ case TWL4030_REG_VDD2:
+ case TWL4030_REG_VPLL1:
+ case TWL4030_REG_VINTANA1:
+ case TWL4030_REG_VINTANA2:
+ case TWL4030_REG_VINTDIG:
+ c->always_on = true;
+ break;
+ default:
+ break;
+ }
rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
if (IS_ERR(rdev)) {
@@ -534,6 +582,9 @@ static int twlreg_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
+ info->remap);
+
/* NOTE: many regulators support short-circuit IRQs (presentable
* as REGULATOR_OVER_CURRENT notifications?) configured via:
* - SC_CONFIG
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 2eefc1a..0a65775 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -19,6 +19,8 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/regulator.h>
@@ -39,6 +41,7 @@
#define WM831X_DCDC_CONTROL_2 1
#define WM831X_DCDC_ON_CONFIG 2
#define WM831X_DCDC_SLEEP_CONTROL 3
+#define WM831X_DCDC_DVS_CONTROL 4
/*
* Shared
@@ -50,6 +53,10 @@ struct wm831x_dcdc {
int base;
struct wm831x *wm831x;
struct regulator_dev *regulator;
+ int dvs_gpio;
+ int dvs_gpio_state;
+ int on_vsel;
+ int dvs_vsel;
};
static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev)
@@ -240,11 +247,9 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV)
+static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
u16 vsel;
if (min_uV < 600000)
@@ -257,39 +262,126 @@ static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
return -EINVAL;
- return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_VSEL_MASK, vsel);
+ return vsel;
+}
+
+static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ u16 vsel;
+
+ if (max_uV < 600000 || max_uV > 1800000)
+ return -EINVAL;
+
+ vsel = ((max_uV - 600000) / 12500) + 8;
+
+ if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
+ wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
+ return -EINVAL;
+
+ return vsel;
+}
+
+static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
+{
+ struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
+
+ if (state == dcdc->dvs_gpio_state)
+ return 0;
+
+ dcdc->dvs_gpio_state = state;
+ gpio_set_value(dcdc->dvs_gpio, state);
+
+ /* Should wait for DVS state change to be asserted if we have
+ * a GPIO for it, for now assume the device is configured
+ * for the fastest possible transition.
+ */
+
+ return 0;
}
static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
+ struct wm831x *wm831x = dcdc->wm831x;
+ int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
+ int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
+ int vsel, ret;
+
+ vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
+ if (vsel < 0)
+ return vsel;
+
+ /* If this value is already set then do a GPIO update if we can */
+ if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
+ return wm831x_buckv_set_dvs(rdev, 0);
+
+ if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel)
+ return wm831x_buckv_set_dvs(rdev, 1);
+
+ /* Always set the ON status to the minimum voltage */
+ ret = wm831x_set_bits(wm831x, on_reg, WM831X_DC1_ON_VSEL_MASK, vsel);
+ if (ret < 0)
+ return ret;
+ dcdc->on_vsel = vsel;
+
+ if (!dcdc->dvs_gpio)
+ return ret;
+
+ /* Kick the voltage transition now */
+ ret = wm831x_buckv_set_dvs(rdev, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set the high voltage as the DVS voltage. This is optimised
+ * for CPUfreq usage, most processors will keep the maximum
+ * voltage constant and lower the minimum with the frequency. */
+ vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
+ if (vsel < 0) {
+ /* This should never happen - at worst the same vsel
+ * should be chosen */
+ WARN_ON(vsel < 0);
+ return 0;
+ }
+
+ /* Don't bother if it's the same VSEL we're already using */
+ if (vsel == dcdc->on_vsel)
+ return 0;
- return wm831x_buckv_set_voltage_int(rdev, reg, min_uV, max_uV);
+ ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+ dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
+ ret);
+
+ return 0;
}
static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
- int uV)
+ int uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
+ struct wm831x *wm831x = dcdc->wm831x;
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+ int vsel;
+
+ vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
+ if (vsel < 0)
+ return vsel;
- return wm831x_buckv_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
}
static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
- int val;
- val = wm831x_reg_read(wm831x, reg);
- if (val < 0)
- return val;
-
- return wm831x_buckv_list_voltage(rdev, val & WM831X_DC1_ON_VSEL_MASK);
+ if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
+ return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+ else
+ return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
}
/* Current limit options */
@@ -346,6 +438,64 @@ static struct regulator_ops wm831x_buckv_ops = {
.set_suspend_mode = wm831x_dcdc_set_suspend_mode,
};
+/*
+ * Set up DVS control. We just log errors since we can still run
+ * (with reduced performance) if we fail.
+ */
+static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
+ struct wm831x_buckv_pdata *pdata)
+{
+ struct wm831x *wm831x = dcdc->wm831x;
+ int ret;
+ u16 ctrl;
+
+ if (!pdata || !pdata->dvs_gpio)
+ return;
+
+ switch (pdata->dvs_control_src) {
+ case 1:
+ ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ case 2:
+ ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
+ pdata->dvs_control_src, dcdc->name);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_SRC_MASK, ctrl);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
+ dcdc->name, ret);
+ return;
+ }
+
+ ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
+ dcdc->name, ret);
+ return;
+ }
+
+ /* gpiolib won't let us read the GPIO status so pick the higher
+ * of the two existing voltages so we take it as platform data.
+ */
+ dcdc->dvs_gpio_state = pdata->dvs_init_state;
+
+ ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n",
+ dcdc->name, ret);
+ gpio_free(pdata->dvs_gpio);
+ return;
+ }
+
+ dcdc->dvs_gpio = pdata->dvs_gpio;
+}
+
static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -384,6 +534,23 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->desc.ops = &wm831x_buckv_ops;
dcdc->desc.owner = THIS_MODULE;
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret);
+ goto err;
+ }
+ dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
+
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
+ goto err;
+ }
+ dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
+
+ if (pdata->dcdc[id])
+ wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
+
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
pdata->dcdc[id], dcdc);
if (IS_ERR(dcdc->regulator)) {
@@ -422,6 +589,8 @@ err_uv:
err_regulator:
regulator_unregister(dcdc->regulator);
err:
+ if (dcdc->dvs_gpio)
+ gpio_free(dcdc->dvs_gpio);
kfree(dcdc);
return ret;
}
@@ -434,6 +603,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
+ if (dcdc->dvs_gpio)
+ gpio_free(dcdc->dvs_gpio);
kfree(dcdc);
return 0;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 902db56..61e02ac 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -470,7 +470,7 @@ static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
- unsigned int ret;
+ int ret;
ret = wm831x_reg_read(wm831x, on_reg);
if (ret < 0)
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bbff09..e7b89e7 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1504,7 +1504,8 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
led->isink_init.consumer_supplies = &led->isink_consumer;
led->isink_init.constraints.min_uA = 0;
led->isink_init.constraints.max_uA = pdata->max_uA;
- led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT;
+ led->isink_init.constraints.valid_ops_mask
+ = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS;
led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
ret = wm8350_register_regulator(wm8350, isink, &led->isink_init);
if (ret != 0) {
@@ -1517,6 +1518,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
led->dcdc_init.num_consumer_supplies = 1;
led->dcdc_init.consumer_supplies = &led->dcdc_consumer;
led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL;
+ led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init);
if (ret != 0) {
platform_device_put(pdev);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index eb154dc..e9aa814 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -686,7 +686,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
*/
#if defined(CONFIG_ATARI)
address_space = 64;
-#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__sparc__)
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
+ || defined(__sparc__) || defined(__mips__)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -1095,9 +1096,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp)
#define cmos_pnp_resume NULL
#endif
-static void cmos_pnp_shutdown(struct device *pdev)
+static void cmos_pnp_shutdown(struct pnp_dev *pnp)
{
- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev))
+ if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
return;
cmos_do_shutdown();
@@ -1116,15 +1117,12 @@ static struct pnp_driver cmos_pnp_driver = {
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
.remove = __exit_p(cmos_pnp_remove),
+ .shutdown = cmos_pnp_shutdown,
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.suspend = cmos_pnp_suspend,
.resume = cmos_pnp_resume,
- .driver = {
- .name = (char *)driver_name,
- .shutdown = cmos_pnp_shutdown,
- }
};
#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 259db7f..9630e7d 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -778,6 +778,8 @@ static int __devinit ds1305_probe(struct spi_device *spi)
spi->irq, status);
goto fail1;
}
+
+ device_set_wakeup_capable(&spi->dev, 1);
}
/* export NVRAM */
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8a99da6..c4ec5c1 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -881,6 +881,8 @@ read_rtc:
"unable to request IRQ!\n");
goto exit_irq;
}
+
+ device_set_wakeup_capable(&client->dev, 1);
set_bit(HAS_ALARM, &ds1307->flags);
dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 713f7bf..5317bbc 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -383,6 +383,8 @@ static int ds1374_probe(struct i2c_client *client,
dev_err(&client->dev, "unable to request IRQ\n");
goto out_free;
}
+
+ device_set_wakeup_capable(&client->dev, 1);
}
ds1374->rtc = rtc_device_register(client->name, &client->dev,
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 3a7be11..812c667 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -376,20 +376,22 @@ static int __devinit fm3130_probe(struct i2c_client *client,
}
/* Disabling calibration mode */
- if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL)
+ if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_CAL));
dev_warn(&client->dev, "Disabling calibration mode!\n");
+ }
/* Disabling read and write modes */
if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
- fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ)
+ fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
fm3130->regs[FM3130_RTC_CONTROL] &
~(FM3130_RTC_CONTROL_BIT_READ |
FM3130_RTC_CONTROL_BIT_WRITE));
dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
+ }
/* oscillator off? turn it on, so clock can tick. */
if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fdb2e7c..5905936 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1004,8 +1004,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
if (device == NULL ||
device != dasd_device_from_cdev_locked(cdev) ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
- "bus_id %s", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "invalid device in request");
return;
}
@@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
device = (struct dasd_device *) cqr->startdev;
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
- DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: "
- "bus_id %s", dev_name(&cdev->dev));
+ DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
+ "invalid device in request");
return;
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index fd12317..148b1dd 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -218,7 +218,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(uid);
if (IS_ERR(newlcu))
- return PTR_ERR(lcu);
+ return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, uid);
if (!lcu) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f64d0db8..6e14863 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
*
*/
-#define KMSG_COMPONENT "dasd-diag"
+#define KMSG_COMPONENT "dasd"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -146,16 +146,16 @@ dasd_diag_erp(struct dasd_device *device)
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(device->features & DASD_FEATURE_READONLY)) {
- dev_warn(&device->cdev->dev,
- "The access mode of a DIAG device changed"
- " to read-only");
+ pr_warning("%s: The access mode of a DIAG device "
+ "changed to read-only\n",
+ dev_name(&device->cdev->dev));
device->features |= DASD_FEATURE_READONLY;
}
rc = 0;
}
if (rc)
- dev_warn(&device->cdev->dev, "DIAG ERP failed with "
- "rc=%d\n", rc);
+ pr_warning("%s: DIAG ERP failed with "
+ "rc=%d\n", dev_name(&device->cdev->dev), rc);
}
/* Start a given request at the device. Return zero on success, non-zero
@@ -371,8 +371,9 @@ dasd_diag_check_device(struct dasd_device *device)
private->pt_block = 2;
break;
default:
- dev_warn(&device->cdev->dev, "Device type %d is not supported "
- "in DIAG mode\n", private->rdc_data.vdev_class);
+ pr_warning("%s: Device type %d is not supported "
+ "in DIAG mode\n", dev_name(&device->cdev->dev),
+ private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
@@ -413,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
- dev_warn(&device->cdev->dev,
- "A 64-bit DIAG call failed\n");
+ pr_warning("%s: A 64-bit DIAG call failed\n",
+ dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
goto out_label;
}
@@ -423,8 +424,9 @@ dasd_diag_check_device(struct dasd_device *device)
break;
}
if (bsize > PAGE_SIZE) {
- dev_warn(&device->cdev->dev, "Accessing the DASD failed because"
- " of an incorrect format (rc=%d)\n", rc);
+ pr_warning("%s: Accessing the DASD failed because of an "
+ "incorrect format (rc=%d)\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
goto out_label;
}
@@ -442,18 +444,18 @@ dasd_diag_check_device(struct dasd_device *device)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
- dev_warn(&device->cdev->dev, "DIAG initialization "
- "failed with rc=%d\n", rc);
+ pr_warning("%s: DIAG initialization failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
} else {
if (rc == 4)
device->features |= DASD_FEATURE_READONLY;
- dev_info(&device->cdev->dev,
- "New DASD with %ld byte/block, total size %ld KB%s\n",
- (unsigned long) block->bp_block,
- (unsigned long) (block->blocks <<
- block->s2b_shift) >> 1,
- (rc == 4) ? ", read-only device" : "");
+ pr_info("%s: New DASD with %ld byte/block, total size %ld "
+ "KB%s\n", dev_name(&device->cdev->dev),
+ (unsigned long) block->bp_block,
+ (unsigned long) (block->blocks <<
+ block->s2b_shift) >> 1,
+ (rc == 4) ? ", read-only device" : "");
rc = 0;
}
out_label:
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5819dc0..1cca21a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -23,6 +23,7 @@
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
+#include <asm/compat.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
@@ -2844,13 +2845,16 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
-#ifndef CONFIG_64BIT
- /* Make sure pointers are sane even on 31 bit. */
- if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
+ if (is_compat_task() || sizeof(long) == 4) {
+ /* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
- goto out;
+ if ((usrparm.psf_data >> 32) != 0)
+ goto out;
+ if ((usrparm.rssd_result >> 32) != 0)
+ goto out;
+ usrparm.psf_data &= 0x7fffffffULL;
+ usrparm.rssd_result &= 0x7fffffffULL;
}
-#endif
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
@@ -3029,7 +3033,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
- scsw_cc(&irb->scsw), req->intrc);
+ scsw_cc(&irb->scsw), req ? req->intrc : 0);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 478bcdb..7039d9c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -17,7 +17,7 @@
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/smp_lock.h>
-
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
@@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
struct ccw_dev_id dev_id;
base = block->base;
- if (!base->discipline->fill_info)
+ if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
@@ -303,10 +303,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
dasd_info->features |=
((base->features & DASD_FEATURE_READONLY) != 0);
- if (base->discipline)
- memcpy(dasd_info->type, base->discipline->name, 4);
- else
- memcpy(dasd_info->type, "none", 4);
+ memcpy(dasd_info->type, base->discipline->name, 4);
if (block->request_queue->request_fn) {
struct list_head *l;
@@ -358,9 +355,8 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
- unsigned long arg)
+ struct cmbdata __user *argp)
{
- struct cmbdata __user *argp = (void __user *) arg;
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
@@ -376,7 +372,12 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block = bdev->bd_disk->private_data;
- void __user *argp = (void __user *)arg;
+ void __user *argp;
+
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
if (!block)
return -ENODEV;
@@ -414,7 +415,7 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDCMFDISABLE:
return disable_cmf(block->base->cdev);
case BIODASDREADALLCMB:
- return dasd_ioctl_readall_cmb(block, cmd, arg);
+ return dasd_ioctl_readall_cmb(block, cmd, argp);
default:
/* if the discipline has an ioctl method try it. */
if (block->base->discipline->ioctl) {
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 6315fbd..71f95f5 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -72,7 +72,7 @@ dasd_devices_show(struct seq_file *m, void *v)
/* Print device number. */
seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
- if (device != NULL && device->discipline != NULL)
+ if (device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
else
seq_printf(m, "(none)");
@@ -92,10 +92,7 @@ dasd_devices_show(struct seq_file *m, void *v)
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
seq_printf(m, "%4s: ", substr);
/* Print device status information. */
- switch ((device != NULL) ? device->state : -1) {
- case -1:
- seq_printf(m, "unknown");
- break;
+ switch (device->state) {
case DASD_STATE_NEW:
seq_printf(m, "new");
break;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9d61683..59ec073 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1037,22 +1037,6 @@ static void tty3215_flush_buffer(struct tty_struct *tty)
}
/*
- * Currently we don't have any io controls for 3215 ttys
- */
-static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- switch (cmd) {
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-/*
* Disable reading from a 3215 tty
*/
static void tty3215_throttle(struct tty_struct * tty)
@@ -1117,7 +1101,6 @@ static const struct tty_operations tty3215_ops = {
.write_room = tty3215_write_room,
.chars_in_buffer = tty3215_chars_in_buffer,
.flush_buffer = tty3215_flush_buffer,
- .ioctl = tty3215_ioctl,
.throttle = tty3215_throttle,
.unthrottle = tty3215_unthrottle,
.stop = tty3215_stop,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 28e4649..31c59b0 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/smp_lock.h>
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
@@ -322,6 +323,7 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
@@ -329,6 +331,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
fp = filp->private_data;
if (!fp)
return -ENODEV;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
@@ -339,10 +345,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
fp->write_command = arg;
break;
case TUBGETI:
- rc = put_user(fp->read_command, (char __user *) arg);
+ rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
- rc = put_user(fp->write_command,(char __user *) arg);
+ rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
@@ -351,8 +357,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
- if (copy_to_user((char __user *) arg, &iocb,
- sizeof(struct raw3270_iocb)))
+ if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
@@ -467,7 +472,7 @@ fs3270_open(struct inode *inode, struct file *filp)
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
- rc = PTR_ERR(fp);
+ rc = PTR_ERR(ib);
goto out;
}
fp->rdbuf = ib;
@@ -511,8 +516,8 @@ static const struct file_operations fs3270_fops = {
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
- .open = fs3270_open, /* open */
- .release = fs3270_close, /* release */
+ .open = fs3270_open, /* open */
+ .release = fs3270_close, /* release */
};
/*
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index b9d2a00..3796ffd 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -495,6 +495,10 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
if (tty->driver_data == NULL)
return -ENOMEM;
tty->low_latency = 0;
+ if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
+ tty->winsize.ws_row = 24;
+ tty->winsize.ws_col = 80;
+ }
}
return 0;
}
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 3657fe1..cb70fa1 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -9,6 +9,7 @@
*/
#define KMSG_COMPONENT "tape_34xx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 0c72aad..9821c58 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -9,6 +9,7 @@
*/
#define KMSG_COMPONENT "tape_3590"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -136,7 +137,7 @@ static void int_to_ext_kekl(struct tape3592_kekl *in,
out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
memcpy(out->label, in->label, sizeof(in->label));
EBCASC(out->label, sizeof(in->label));
- strstrip(out->label);
+ strim(out->label);
}
static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 4799cc2..8d3d720 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -11,6 +11,7 @@
*/
#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/module.h>
@@ -45,8 +46,6 @@
*/
static int tapeblock_open(struct block_device *, fmode_t);
static int tapeblock_release(struct gendisk *, fmode_t);
-static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
@@ -54,7 +53,6 @@ static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
@@ -415,42 +413,6 @@ tapeblock_release(struct gendisk *disk, fmode_t mode)
}
/*
- * Support of some generic block device IOCTLs.
- */
-static int
-tapeblock_ioctl(
- struct block_device * bdev,
- fmode_t mode,
- unsigned int command,
- unsigned long arg
-) {
- int rc;
- int minor;
- struct gendisk *disk = bdev->bd_disk;
- struct tape_device *device;
-
- rc = 0;
- BUG_ON(!disk);
- device = disk->private_data;
- BUG_ON(!device);
- minor = MINOR(bdev->bd_dev);
-
- DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
- DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
-
- switch (command) {
- /* Refuse some IOCTL calls without complaining (mount). */
- case 0x5310: /* CDROMMULTISESSION */
- rc = -EINVAL;
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
* Initialize block device frontend.
*/
int
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 23d773a..539045a 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -10,11 +10,15 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/smp_lock.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
@@ -34,8 +38,9 @@ static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
-static long tapechar_compat_ioctl(struct file *, unsigned int,
- unsigned long);
+#ifdef CONFIG_COMPAT
+static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
static const struct file_operations tape_fops =
{
@@ -43,7 +48,9 @@ static const struct file_operations tape_fops =
.read = tapechar_read,
.write = tapechar_write,
.unlocked_ioctl = tapechar_ioctl,
+#ifdef CONFIG_COMPAT
.compat_ioctl = tapechar_compat_ioctl,
+#endif
.open = tapechar_open,
.release = tapechar_release,
};
@@ -454,15 +461,22 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
return rc;
}
+#ifdef CONFIG_COMPAT
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
+ unsigned long argp;
+ /* The 'arg' argument of any ioctl function may only be used for
+ * pointers because of the compat pointer conversion.
+ * Consider this when adding new ioctls.
+ */
+ argp = (unsigned long) compat_ptr(data);
if (device->discipline->ioctl_fn) {
mutex_lock(&device->mutex);
- rval = device->discipline->ioctl_fn(device, no, data);
+ rval = device->discipline->ioctl_fn(device, no, argp);
mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
@@ -470,6 +484,7 @@ tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
return rval;
}
+#endif /* CONFIG_COMPAT */
/*
* Initialize character device frontend.
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index ddc914cc..b2864e3 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -7,6 +7,10 @@
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f5d6802..81b094e 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -12,6 +12,8 @@
*/
#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index ebd820c..0ceb379 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -11,6 +11,9 @@
* PROCFS Functions
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 750354a..03f07e5 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -11,6 +11,9 @@
* Stefan Bader <shbader@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index a6087ce..921dcda 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
@@ -139,21 +140,26 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
+ int __user *argp;
int temp;
session = (struct vmcp_session *)file->private_data;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
- temp = get_user(session->bufsize, (int __user *)arg);
+ temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
@@ -163,7 +169,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index d033414..e1b700a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -10,5 +10,5 @@ obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
-qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
+qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 9509e38..7a28a30 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -49,7 +49,6 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
*/
static void ccwreq_stop(struct ccw_device *cdev, int rc)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
if (req->done)
@@ -57,7 +56,6 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
req->done = 1;
ccw_device_set_timeout(cdev, 0);
memset(&cdev->private->irb, 0, sizeof(struct irb));
- sch->lpm = sch->schib.pmcw.pam;
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -80,7 +78,6 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- sch->lpm = 0xff;
memset(&cdev->private->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index cc5144b..c84ac94 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
@@ -770,24 +771,30 @@ out_free:
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp;
+
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
- return chsc_ioctl_start((void __user *)arg);
+ return chsc_ioctl_start(argp);
case CHSC_INFO_CHANNEL_PATH:
- return chsc_ioctl_info_channel_path((void __user *)arg);
+ return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
- return chsc_ioctl_info_cu((void __user *)arg);
+ return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
- return chsc_ioctl_info_sch_cu((void __user *)arg);
+ return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
- return chsc_ioctl_conf_info((void __user *)arg);
+ return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
- return chsc_ioctl_conf_comp_list((void __user *)arg);
+ return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
- return chsc_ioctl_chpd((void __user *)arg);
+ return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
- return chsc_ioctl_dcal((void __user *)arg);
+ return chsc_ioctl_dcal(argp);
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 73901c9..a6c7d542 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1519,6 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
+ sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index aad188e..6facb54 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -142,7 +142,7 @@ static void spid_do(struct ccw_device *cdev)
u8 fn;
/* Use next available path that is not already in correct state. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
if (!req->lpm)
goto out_nopath;
/* Channel program setup. */
@@ -254,15 +254,15 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
*p = first;
}
-static u8 pgid_to_vpm(struct ccw_device *cdev)
+static u8 pgid_to_donepm(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int i;
int lpm;
- u8 vpm = 0;
+ u8 donepm = 0;
- /* Set VPM bits for paths which are already in the target state. */
+ /* Set bits for paths which are already in the target state. */
for (i = 0; i < 8; i++) {
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
@@ -282,10 +282,10 @@ static u8 pgid_to_vpm(struct ccw_device *cdev)
if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
continue;
}
- vpm |= lpm;
+ donepm |= lpm;
}
- return vpm;
+ return donepm;
}
static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
@@ -307,6 +307,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
int mismatch = 0;
int reserved = 0;
int reset = 0;
+ u8 donepm;
if (rc)
goto out;
@@ -316,18 +317,20 @@ static void snid_done(struct ccw_device *cdev, int rc)
else if (mismatch)
rc = -EOPNOTSUPP;
else {
- sch->vpm = pgid_to_vpm(cdev);
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_todo_mask &= ~donepm;
pgid_fill(cdev, pgid);
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
- cdev->private->pgid_valid_mask, sch->vpm, mismatch,
- reserved, reset);
+ "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
/* Anything left to do? */
- if (sch->vpm == sch->schib.pmcw.pam) {
+ if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
return;
}
@@ -411,6 +414,7 @@ static void verify_start(struct ccw_device *cdev)
struct ccw_dev_id *devid = &cdev->private->dev_id;
sch->vpm = 0;
+ sch->lpm = sch->schib.pmcw.pam;
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
@@ -442,11 +446,14 @@ static void verify_start(struct ccw_device *cdev)
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Initialize PGID data. */
memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677df..ca5e9bb 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
- memset(tcat, 0, sizeof(tcat));
+ memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
*/
void tsb_init(struct tsb *tsb)
{
- memset(tsb, 0, sizeof(tsb));
+ memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d72ae4c..b9ce712 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -150,6 +150,7 @@ struct ccw_device_private {
struct ccw_request req; /* internal I/O request */
int iretry;
u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ff7748a..44f2f6a 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,6 +182,34 @@ struct scssc_area {
u32:32;
} __attribute__ ((packed));
+struct qdio_dev_perf_stat {
+ unsigned int adapter_int;
+ unsigned int qdio_int;
+ unsigned int pci_request_int;
+
+ unsigned int tasklet_inbound;
+ unsigned int tasklet_inbound_resched;
+ unsigned int tasklet_inbound_resched2;
+ unsigned int tasklet_outbound;
+
+ unsigned int siga_read;
+ unsigned int siga_write;
+ unsigned int siga_sync;
+
+ unsigned int inbound_call;
+ unsigned int inbound_handler;
+ unsigned int stop_polling;
+ unsigned int inbound_queue_full;
+ unsigned int outbound_call;
+ unsigned int outbound_handler;
+ unsigned int fast_requeue;
+ unsigned int target_full;
+ unsigned int eqbs;
+ unsigned int eqbs_partial;
+ unsigned int sqbs;
+ unsigned int sqbs_partial;
+};
+
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
@@ -269,6 +297,7 @@ struct qdio_irq {
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
struct dentry *debugfs_dev;
+ struct dentry *debugfs_perf;
unsigned long int_parm;
struct subchannel_id schid;
@@ -286,9 +315,10 @@ struct qdio_irq {
struct ciw aqueue;
struct qdio_ssqd_desc ssqd_desc;
-
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+ struct qdio_dev_perf_stat perf_stat;
+ int perf_stat_enabled;
/*
* Warning: Leave these members together at the end so they won't be
* cleared in qdio_setup_irq.
@@ -311,6 +341,10 @@ struct qdio_irq {
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
+#define qperf(qdev,attr) qdev->perf_stat.attr
+#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \
+ q->irq_ptr->perf_stat.attr++
+
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 7676997..f49761f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -55,13 +55,11 @@ static int qstat_show(struct seq_file *m, void *v)
if (!q)
return 0;
- seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci);
- seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
- seq_printf(m, "ftc: %d\n", q->first_to_check);
- seq_printf(m, "last_move: %d\n", q->last_move);
- seq_printf(m, "polling: %d\n", q->u.in.polling);
- seq_printf(m, "ack start: %d\n", q->u.in.ack_start);
- seq_printf(m, "ack count: %d\n", q->u.in.ack_count);
+ seq_printf(m, "DSCI: %d nr_used: %d\n",
+ *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
+ seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
+ seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
+ q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
@@ -110,7 +108,6 @@ static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
if (!q)
return 0;
-
if (q->is_input_q)
xchg(q->irq_ptr->dsci, 1);
local_bh_disable();
@@ -134,6 +131,98 @@ static const struct file_operations debugfs_fops = {
.release = single_release,
};
+static char *qperf_names[] = {
+ "Assumed adapter interrupts",
+ "QDIO interrupts",
+ "Requested PCIs",
+ "Inbound tasklet runs",
+ "Inbound tasklet resched",
+ "Inbound tasklet resched2",
+ "Outbound tasklet runs",
+ "SIGA read",
+ "SIGA write",
+ "SIGA sync",
+ "Inbound calls",
+ "Inbound handler",
+ "Inbound stop_polling",
+ "Inbound queue full",
+ "Outbound calls",
+ "Outbound handler",
+ "Outbound fast_requeue",
+ "Outbound target_full",
+ "QEBSM eqbs",
+ "QEBSM eqbs partial",
+ "QEBSM sqbs",
+ "QEBSM sqbs partial"
+};
+
+static int qperf_show(struct seq_file *m, void *v)
+{
+ struct qdio_irq *irq_ptr = m->private;
+ unsigned int *stat;
+ int i;
+
+ if (!irq_ptr)
+ return 0;
+ if (!irq_ptr->perf_stat_enabled) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ stat = (unsigned int *)&irq_ptr->perf_stat;
+
+ for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
+ seq_printf(m, "%26s:\t%u\n",
+ qperf_names[i], *(stat + i));
+ return 0;
+}
+
+static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct qdio_irq *irq_ptr = seq->private;
+ unsigned long val;
+ char buf[8];
+ int ret;
+
+ if (!irq_ptr)
+ return 0;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(&buf, ubuf, count))
+ return -EFAULT;
+ buf[count] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ irq_ptr->perf_stat_enabled = 0;
+ memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
+ break;
+ case 1:
+ irq_ptr->perf_stat_enabled = 1;
+ break;
+ }
+ return count;
+}
+
+static int qperf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, qperf_show,
+ filp->f_path.dentry->d_inode->i_private);
+}
+
+static struct file_operations debugfs_perf_fops = {
+ .owner = THIS_MODULE,
+ .open = qperf_seq_open,
+ .read = seq_read,
+ .write = qperf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
char name[QDIO_DEBUGFS_NAME_LEN];
@@ -156,6 +245,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
debugfs_root);
if (IS_ERR(irq_ptr->debugfs_dev))
irq_ptr->debugfs_dev = NULL;
+
+ irq_ptr->debugfs_perf = debugfs_create_file("statistics",
+ S_IFREG | S_IRUGO | S_IWUSR,
+ irq_ptr->debugfs_dev, irq_ptr,
+ &debugfs_perf_fops);
+ if (IS_ERR(irq_ptr->debugfs_perf))
+ irq_ptr->debugfs_perf = NULL;
+
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
@@ -171,6 +268,7 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_perf);
debugfs_remove(irq_ptr->debugfs_dev);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84..62b654a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -22,7 +22,6 @@
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
"Jan Glauber <jang@linux.vnet.ibm.com>");
@@ -126,7 +125,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int rc;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_all);
+ qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -139,7 +138,7 @@ again:
* buffers later.
*/
if ((ccq == 96) && (count != tmp_count)) {
- qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete);
+ qperf_inc(q, eqbs_partial);
return (count - tmp_count);
}
@@ -182,7 +181,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
return 0;
BUG_ON(!q->irq_ptr->sch_token);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_all);
+ qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
@@ -191,7 +190,7 @@ again:
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
- qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete);
+ qperf_inc(q, sqbs_partial);
goto again;
}
if (rc < 0) {
@@ -285,7 +284,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_sync);
+ qperf_inc(q, siga_sync);
cc = do_siga_sync(q->irq_ptr->schid, output, input);
if (cc)
@@ -350,7 +349,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_in);
+ qperf_inc(q, siga_read);
cc = do_siga_input(q->irq_ptr->schid, q->mask);
if (cc)
@@ -382,7 +381,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
return;
q->u.in.polling = 0;
- qdio_perf_stat_inc(&perf_stats.debug_stop_polling);
+ qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
if (is_qebsm(q)) {
@@ -400,7 +399,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
/* special handling for no target buffer empty */
if ((!q->is_input_q &&
(q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
- qdio_perf_stat_inc(&perf_stats.outbound_target_full);
+ qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
q->first_to_check);
return;
@@ -486,7 +485,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
+ if (atomic_sub(count, &q->nr_buf_used) == 0)
+ qperf_inc(q, inbound_queue_full);
break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
- if (state == SLSB_P_INPUT_PRIMED)
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
@@ -566,9 +566,10 @@ static void qdio_kick_handler(struct qdio_q *q)
count = sub_buf(end, start);
if (q->is_input_q) {
- qdio_perf_stat_inc(&perf_stats.inbound_handler);
+ qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
} else
+ qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
@@ -582,24 +583,28 @@ static void qdio_kick_handler(struct qdio_q *q)
static void __qdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_inbound);
+ qperf_inc(q, tasklet_inbound);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
+ qperf_inc(q, tasklet_inbound_resched);
goto again;
+ }
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q)) {
+ qperf_inc(q, tasklet_inbound_resched2);
goto again;
+ }
}
void qdio_inbound_processing(unsigned long data)
@@ -687,7 +692,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
- qdio_perf_stat_inc(&perf_stats.siga_out);
+ qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
switch (cc) {
@@ -710,7 +715,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
static void __qdio_outbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.tasklet_outbound);
+ qperf_inc(q, tasklet_outbound);
BUG_ON(atomic_read(&q->nr_buf_used) < 0);
if (qdio_outbound_q_moved(q))
@@ -738,12 +743,9 @@ static void __qdio_outbound_processing(struct qdio_q *q)
*/
if (qdio_outbound_q_done(q))
del_timer(&q->u.out.timer);
- else {
- if (!timer_pending(&q->u.out.timer)) {
+ else
+ if (!timer_pending(&q->u.out.timer))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
- }
- }
return;
sched:
@@ -783,7 +785,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qperf_inc(q, tasklet_inbound);
qdio_sync_after_thinint(q);
/*
@@ -798,7 +800,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ qperf_inc(q, tasklet_inbound_resched);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
return;
@@ -811,7 +813,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ qperf_inc(q, tasklet_inbound_resched2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
@@ -850,8 +852,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
return;
- qdio_perf_stat_inc(&perf_stats.pci_int);
-
for_each_input_queue(irq_ptr, q, i)
tasklet_schedule(&q->tasklet);
@@ -922,8 +922,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
int cstat, dstat;
- qdio_perf_stat_inc(&perf_stats.qdio_int);
-
if (!intparm || !irq_ptr) {
DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
return;
@@ -962,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
default:
WARN_ON(1);
}
@@ -1382,6 +1382,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
{
int used, diff;
+ qperf_inc(q, inbound_call);
+
if (!q->u.in.polling)
goto set;
@@ -1437,14 +1439,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
unsigned char state;
int used, rc = 0;
- qdio_perf_stat_inc(&perf_stats.outbound_handler);
+ qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
- if (callflags & QDIO_FLAG_PCI_OUT)
+ if (callflags & QDIO_FLAG_PCI_OUT) {
q->u.out.pci_out_enabled = 1;
+ qperf_inc(q, pci_request_int);
+ }
else
q->u.out.pci_out_enabled = 0;
@@ -1483,7 +1487,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (state != SLSB_CU_OUTPUT_PRIMED)
rc = qdio_kick_outbound_q(q);
else
- qdio_perf_stat_inc(&perf_stats.fast_requeue);
+ qperf_inc(q, fast_requeue);
out:
tasklet_schedule(&q->tasklet);
@@ -1539,16 +1543,11 @@ static int __init init_QDIO(void)
rc = qdio_debug_init();
if (rc)
goto out_ti;
- rc = qdio_setup_perf_stats();
- if (rc)
- goto out_debug;
rc = tiqdio_register_thinints();
if (rc)
- goto out_perf;
+ goto out_debug;
return 0;
-out_perf:
- qdio_remove_perf_stats();
out_debug:
qdio_debug_exit();
out_ti:
@@ -1562,7 +1561,6 @@ static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_remove_perf_stats();
qdio_debug_exit();
qdio_setup_exit();
}
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
deleted file mode 100644
index 968e3c7..0000000
--- a/drivers/s390/cio/qdio_perf.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.c
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/ccwdev.h>
-
-#include "cio.h"
-#include "css.h"
-#include "device.h"
-#include "ioasm.h"
-#include "chsc.h"
-#include "qdio_debug.h"
-#include "qdio_perf.h"
-
-int qdio_performance_stats;
-struct qdio_perf_stats perf_stats;
-
-#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *qdio_perf_pde;
-#endif
-
-/*
- * procfs functions
- */
-static int qdio_perf_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.qdio_int));
- seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.pci_int));
- seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.thin_int));
- seq_printf(m, "\n");
- seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_inbound));
- seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.tasklet_outbound));
- seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.tasklet_thinint),
- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
- seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound),
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
- seq_printf(m, "\n");
- seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_in));
- seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_out));
- seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.siga_sync));
- seq_printf(m, "\n");
- seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.inbound_handler));
- seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_handler));
- seq_printf(m, "\n");
- seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
- (long)atomic_long_read(&perf_stats.fast_requeue));
- seq_printf(m, "Number of outbound target full condition\t: %li\n",
- (long)atomic_long_read(&perf_stats.outbound_target_full));
- seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
- seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
- (long)atomic_long_read(&perf_stats.debug_stop_polling));
- seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
- seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
- seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
- seq_printf(m, "\n");
- return 0;
-}
-static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, qdio_perf_proc_show, NULL);
-}
-
-static const struct file_operations qdio_perf_proc_fops = {
- .owner = THIS_MODULE,
- .open = qdio_perf_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*
- * sysfs functions
- */
-static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
-{
- return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
-}
-
-static ssize_t qdio_perf_stats_store(struct bus_type *bus,
- const char *buf, size_t count)
-{
- unsigned long i;
-
- if (strict_strtoul(buf, 16, &i) != 0)
- return -EINVAL;
- if ((i != 0) && (i != 1))
- return -EINVAL;
- if (i == qdio_performance_stats)
- return count;
-
- qdio_performance_stats = i;
- /* reset performance statistics */
- if (i == 0)
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- return count;
-}
-
-static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
- qdio_perf_stats_store);
-
-int __init qdio_setup_perf_stats(void)
-{
- int rc;
-
- rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
- if (rc)
- return rc;
-
-#ifdef CONFIG_PROC_FS
- memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
- qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
- NULL, &qdio_perf_proc_fops);
-#endif
- return 0;
-}
-
-void qdio_remove_perf_stats(void)
-{
-#ifdef CONFIG_PROC_FS
- remove_proc_entry("qdio_perf", NULL);
-#endif
- bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
-}
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
deleted file mode 100644
index ff4504c..0000000
--- a/drivers/s390/cio/qdio_perf.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * drivers/s390/cio/qdio_perf.h
- *
- * Copyright IBM Corp. 2008
- *
- * Author: Jan Glauber (jang@linux.vnet.ibm.com)
- */
-#ifndef QDIO_PERF_H
-#define QDIO_PERF_H
-
-#include <linux/types.h>
-#include <asm/atomic.h>
-
-struct qdio_perf_stats {
- /* interrupt handler calls */
- atomic_long_t qdio_int;
- atomic_long_t pci_int;
- atomic_long_t thin_int;
-
- /* tasklet runs */
- atomic_long_t tasklet_inbound;
- atomic_long_t tasklet_outbound;
- atomic_long_t tasklet_thinint;
- atomic_long_t tasklet_thinint_loop;
- atomic_long_t thinint_inbound;
- atomic_long_t thinint_inbound_loop;
- atomic_long_t thinint_inbound_loop2;
-
- /* signal adapter calls */
- atomic_long_t siga_out;
- atomic_long_t siga_in;
- atomic_long_t siga_sync;
-
- /* misc */
- atomic_long_t inbound_handler;
- atomic_long_t outbound_handler;
- atomic_long_t fast_requeue;
- atomic_long_t outbound_target_full;
-
- /* for debugging */
- atomic_long_t debug_tl_out_timer;
- atomic_long_t debug_stop_polling;
- atomic_long_t debug_eqbs_all;
- atomic_long_t debug_eqbs_incomplete;
- atomic_long_t debug_sqbs_all;
- atomic_long_t debug_sqbs_incomplete;
-};
-
-extern struct qdio_perf_stats perf_stats;
-extern int qdio_performance_stats;
-
-static inline void qdio_perf_stat_inc(atomic_long_t *count)
-{
- if (qdio_performance_stats)
- atomic_long_inc(count);
-}
-
-int qdio_setup_perf_stats(void);
-void qdio_remove_perf_stats(void);
-
-#endif
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc..8c2dea5 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -48,7 +48,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
if (!irq_ptr)
return;
- WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +81,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
- WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
- WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
@@ -131,7 +128,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
q->sbal[j] = *sbals_array++;
- WARN_ON((unsigned long)q->sbal[j] & 0xff);
+ BUG_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
@@ -147,11 +144,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
-
- DBF_EVENT("sl-slsb-sbal");
- DBF_HEX(q->sl, sizeof(void *));
- DBF_HEX(&q->slsb, sizeof(void *));
- DBF_HEX(q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 981a77e..091d904 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,9 +1,7 @@
/*
* linux/drivers/s390/cio/thinint_qdio.c
*
- * thin interrupt support for qdio
- *
- * Copyright 2000-2008 IBM Corp.
+ * Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
@@ -19,7 +17,6 @@
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
-#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
@@ -132,8 +129,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
struct qdio_q *q;
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
@@ -154,6 +149,7 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data)
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
+ qperf_inc(q, adapter_int);
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 0d4d18b..c68be24 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -393,10 +393,12 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
* u_mult_inv > 128 bytes.
*/
if (copied == 0) {
- int len;
+ unsigned int len;
spin_unlock_bh(&zcrypt_device_lock);
/* len is max 256 / 2 - 120 = 8 */
len = crt->inputdatalength / 2 - 120;
+ if (len > sizeof(z1))
+ return -EFAULT;
z1 = z2 = z3 = 0;
if (copy_from_user(&z1, crt->np_prime, len) ||
copy_from_user(&z2, crt->bp_key, len) ||
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index a23726a..142f72a 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -373,6 +373,8 @@ static int convert_type86(struct zcrypt_device *zdev,
zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
return -EAGAIN;
}
+ if (service_rc == 8 && service_rs == 72)
+ return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 79c1205..68f3e62 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -470,6 +470,8 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
}
if (service_rc == 12 && service_rs == 769)
return -EINVAL;
+ if (service_rc == 8 && service_rs == 72)
+ return -EINVAL;
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 3c77bfe..147bb1a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3398,7 +3398,7 @@ claw_init(void)
goto out_err;
}
CLAW_DBF_TEXT(2, setup, "init_mod");
- claw_root_dev = root_device_register("qeth");
+ claw_root_dev = root_device_register("claw");
ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
if (ret)
goto register_err;
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index f932400..0eb6eef 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/miscdevice.h>
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include "zfcp_def.h"
#include "zfcp_ext.h"
@@ -163,7 +164,7 @@ static void zfcp_cfdc_req_to_sense(struct zfcp_cfdc_data *data,
}
static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
- unsigned long buffer)
+ unsigned long arg)
{
struct zfcp_cfdc_data *data;
struct zfcp_cfdc_data __user *data_user;
@@ -175,7 +176,11 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
if (command != ZFCP_CFDC_IOC)
return -ENOTTY;
- data_user = (void __user *) buffer;
+ if (is_compat_task())
+ data_user = compat_ptr(arg);
+ else
+ data_user = (void __user *)arg;
+
if (!data_user)
return -EINVAL;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 8445095..7369c89 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -327,7 +327,7 @@ static void zfcp_dbf_hba_view_response(char **p,
break;
zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
- p += sprintf(*p, "\n");
+ *p += sprintf(*p, "\n");
break;
case FSF_QTCB_OPEN_PORT_WITH_DID:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 03dec83..66bdb34 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -108,6 +108,7 @@ extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@@ -129,9 +130,9 @@ extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
- mempool_t *);
+ mempool_t *, unsigned int);
extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
- struct zfcp_fsf_ct_els *);
+ struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ac5e3b7..271399f 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -258,7 +258,8 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
- adapter->pool.gid_pn_req);
+ adapter->pool.gid_pn_req,
+ ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
zfcp_fc_ns_gid_pn_eval(gid_pn);
@@ -421,7 +422,8 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
hton24(adisc->adisc_req.adisc_port_id,
fc_host_port_id(adapter->scsi_host));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els);
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+ ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_data.adisc_cache, adisc);
@@ -532,7 +534,8 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL);
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+ ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
return ret;
@@ -668,15 +671,52 @@ static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
- int status = zfcp_ct_els->status;
- int reply_status;
+ struct fc_bsg_reply *jr = job->reply;
- reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
- job->reply->reply_data.ctels_reply.status = reply_status;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+ jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ jr->result = zfcp_ct_els->status ? -EIO : 0;
job->job_done(job);
}
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+{
+ u32 preamble_word1;
+ u8 gs_type;
+ struct zfcp_adapter *adapter;
+
+ preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ gs_type = (preamble_word1 & 0xff000000) >> 24;
+
+ adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+
+ switch (gs_type) {
+ case FC_FST_ALIAS:
+ return &adapter->gs->as;
+ case FC_FST_MGMT:
+ return &adapter->gs->ms;
+ case FC_FST_TIME:
+ return &adapter->gs->ts;
+ break;
+ case FC_FST_DIR:
+ return &adapter->gs->ds;
+ break;
+ default:
+ return NULL;
+ }
+}
+
+static void zfcp_fc_ct_job_handler(void *data)
+{
+ struct fc_bsg_job *job = data;
+ struct zfcp_fc_wka_port *wka_port;
+
+ wka_port = zfcp_fc_job_wka_port(job);
+ zfcp_fc_wka_port_put(wka_port);
+
+ zfcp_fc_ct_els_job_handler(data);
+}
+
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
@@ -695,43 +735,27 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
- return zfcp_fsf_send_els(adapter, d_id, els);
+ els->handler = zfcp_fc_ct_els_job_handler;
+ return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
- u8 gs_type;
struct zfcp_fsf_ct_els *ct = job->dd_data;
struct zfcp_fc_wka_port *wka_port;
- u32 preamble_word1;
- preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
- gs_type = (preamble_word1 & 0xff000000) >> 24;
-
- switch (gs_type) {
- case FC_FST_ALIAS:
- wka_port = &adapter->gs->as;
- break;
- case FC_FST_MGMT:
- wka_port = &adapter->gs->ms;
- break;
- case FC_FST_TIME:
- wka_port = &adapter->gs->ts;
- break;
- case FC_FST_DIR:
- wka_port = &adapter->gs->ds;
- break;
- default:
- return -EINVAL; /* no such service */
- }
+ wka_port = zfcp_fc_job_wka_port(job);
+ if (!wka_port)
+ return -EINVAL;
ret = zfcp_fc_wka_port_get(wka_port);
if (ret)
return ret;
- ret = zfcp_fsf_send_ct(wka_port, ct, NULL);
+ ct->handler = zfcp_fc_ct_job_handler;
+ ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
if (ret)
zfcp_fc_wka_port_put(wka_port);
@@ -752,7 +776,6 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
ct_els->req = job->request_payload.sg_list;
ct_els->resp = job->reply_payload.sg_list;
- ct_els->handler = zfcp_fc_ct_els_job_handler;
ct_els->handler_data = job;
switch (job->request->msgcode) {
@@ -767,6 +790,12 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
}
}
+int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+{
+ /* hardware tracks timeout, reset bsg timeout to not interfere */
+ return -EAGAIN;
+}
+
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_fc_wka_ports *wka_ports;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index cb2a366..0747b08 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -27,6 +27,8 @@
#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
(ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
+
/**
* struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
* @ct_hdr: FC GS common transport header
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 482dcd9..e8fb4d9 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1068,20 +1068,20 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
struct scatterlist *sg_req,
struct scatterlist *sg_resp,
- int max_sbals)
+ int max_sbals, unsigned int timeout)
{
int ret;
- unsigned int fcp_chan_timeout;
ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
if (ret)
return ret;
/* common settings for ct/gs and els requests */
- fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000;
+ if (timeout > 255)
+ timeout = 255; /* max value accepted by hardware */
req->qtcb->bottom.support.service_class = FSF_CLASS_3;
- req->qtcb->bottom.support.timeout = fcp_chan_timeout;
- zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ);
+ req->qtcb->bottom.support.timeout = timeout;
+ zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
return 0;
}
@@ -1092,7 +1092,8 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
* @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
*/
int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
- struct zfcp_fsf_ct_els *ct, mempool_t *pool)
+ struct zfcp_fsf_ct_els *ct, mempool_t *pool,
+ unsigned int timeout)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
@@ -1111,7 +1112,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
- FSF_MAX_SBALS_PER_REQ);
+ FSF_MAX_SBALS_PER_REQ, timeout);
if (ret)
goto failed_send;
@@ -1188,7 +1189,7 @@ skip_fsfstatus:
* @els: pointer to struct zfcp_send_els with data for the command
*/
int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
- struct zfcp_fsf_ct_els *els)
+ struct zfcp_fsf_ct_els *els, unsigned int timeout)
{
struct zfcp_fsf_req *req;
struct zfcp_qdio *qdio = adapter->qdio;
@@ -1206,7 +1207,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
+ ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
if (ret)
goto failed_send;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 771cc53..8e6fc68 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -652,6 +652,7 @@ struct fc_function_template zfcp_transport_functions = {
.show_host_port_state = 1,
.show_host_active_fc4s = 1,
.bsg_request = zfcp_fc_exec_bsg_job,
+ .bsg_timeout = zfcp_fc_timeout_bsg_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 7c815d3..28d86f9 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -522,6 +522,40 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op,
set_fan_speeds(fp);
}
+static void destroy_one_temp(struct bbc_cpu_temperature *tp)
+{
+ bbc_i2c_detach(tp->client);
+ kfree(tp);
+}
+
+static void destroy_all_temps(struct bbc_i2c_bus *bp)
+{
+ struct bbc_cpu_temperature *tp, *tpos;
+
+ list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
+ list_del(&tp->bp_list);
+ list_del(&tp->glob_list);
+ destroy_one_temp(tp);
+ }
+}
+
+static void destroy_one_fan(struct bbc_fan_control *fp)
+{
+ bbc_i2c_detach(fp->client);
+ kfree(fp);
+}
+
+static void destroy_all_fans(struct bbc_i2c_bus *bp)
+{
+ struct bbc_fan_control *fp, *fpos;
+
+ list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
+ list_del(&fp->bp_list);
+ list_del(&fp->glob_list);
+ destroy_one_fan(fp);
+ }
+}
+
int bbc_envctrl_init(struct bbc_i2c_bus *bp)
{
struct of_device *op;
@@ -541,6 +575,8 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
int err = PTR_ERR(kenvctrld_task);
kenvctrld_task = NULL;
+ destroy_all_temps(bp);
+ destroy_all_fans(bp);
return err;
}
}
@@ -548,35 +584,11 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
return 0;
}
-static void destroy_one_temp(struct bbc_cpu_temperature *tp)
-{
- bbc_i2c_detach(tp->client);
- kfree(tp);
-}
-
-static void destroy_one_fan(struct bbc_fan_control *fp)
-{
- bbc_i2c_detach(fp->client);
- kfree(fp);
-}
-
void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
{
- struct bbc_cpu_temperature *tp, *tpos;
- struct bbc_fan_control *fp, *fpos;
-
if (kenvctrld_task)
kthread_stop(kenvctrld_task);
- list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
- list_del(&tp->bp_list);
- list_del(&tp->glob_list);
- destroy_one_temp(tp);
- }
-
- list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) {
- list_del(&fp->bp_list);
- list_del(&fp->glob_list);
- destroy_one_fan(fp);
- }
+ destroy_all_temps(bp);
+ destroy_all_fans(bp);
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3bf7592..84d3bba 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
Fix bug in twa_get_param() on 4GB+.
Use pci_resource_len() for ioremap().
2.26.02.012 - Add power management support.
+ 2.26.02.013 - Fix bug in twa_load_sgl().
*/
#include <linux/module.h>
@@ -100,7 +101,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.012"
+#define TW_DRIVER_VERSION "2.26.02.013"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
newcommand = &full_command_packet->command.newcommand;
newcommand->request_id__lunl =
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
- newcommand->sg_list[0].length = cpu_to_le32(length);
+ if (length) {
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].length = cpu_to_le32(length);
+ }
newcommand->sgl_entries__lunh =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
} else {
oldcommand = &full_command_packet->command.oldcommand;
oldcommand->request_id = request_id;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 36900c7..9191d1e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
Please read the comments at the top of
<file:drivers/scsi/3w-xxxx.c>.
+config SCSI_HPSA
+ tristate "HP Smart Array SCSI driver"
+ depends on PCI && SCSI
+ help
+ This driver supports HP Smart Array Controllers (circa 2009).
+ It is a SCSI alternative to the cciss driver, which is a block
+ driver. Anyone wishing to use HP Smart Array controllers who
+ would prefer the devices be presented to linux as SCSI devices,
+ rather than as generic block devices should say Y here.
+
config SCSI_3W_9XXX
tristate "3ware 9xxx SATA-RAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 280d3c657..92a8c50 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
+obj-$(CONFIG_SCSI_HPSA) += hpsa.o
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 2a88985..7e26ebc 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -293,7 +293,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
status = -EINVAL;
}
}
- aac_fib_complete(fibptr);
+ /* Do not set XferState to zero unless receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+
/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
if (status >= 0) {
if ((aac_commit == 1) || commit_flag) {
@@ -310,13 +313,18 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
FsaNormal,
1, 1,
NULL, NULL);
- aac_fib_complete(fibptr);
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
} else if (aac_commit == 0) {
printk(KERN_WARNING
"aac_get_config_status: Foreign device configurations are being ignored\n");
}
}
- aac_fib_free(fibptr);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
return status;
}
@@ -355,7 +363,9 @@ int aac_get_containers(struct aac_dev *dev)
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
aac_fib_complete(fibptr);
}
- aac_fib_free(fibptr);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
@@ -1245,8 +1255,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
NULL);
if (rcode < 0) {
- aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
return rcode;
}
memcpy(&dev->adapter_info, info, sizeof(*info));
@@ -1270,6 +1284,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (rcode >= 0)
memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
+ if (rcode == -ERESTARTSYS) {
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ return -ENOMEM;
+ }
+
}
@@ -1470,9 +1490,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
(dev->scsi_host_ptr->sg_tablesize * 8) + 112;
}
}
-
- aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
return rcode;
}
@@ -1633,6 +1655,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* Alocate and initialize a Fib
*/
if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ printk(KERN_WARNING "aac_read: fib allocation failed\n");
return -1;
}
@@ -1712,9 +1735,14 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
- scsicmd->result = DID_ERROR << 16;
- scsicmd->scsi_done(scsicmd);
- return 0;
+ /* FIB temporarily unavailable,not catastrophic failure */
+
+ /* scsicmd->result = DID_ERROR << 16;
+ * scsicmd->scsi_done(scsicmd);
+ * return 0;
+ */
+ printk(KERN_WARNING "aac_write: fib allocation failed\n");
+ return -1;
}
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 83986ed..619c02d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2461
+# define AAC_DRIVER_BUILD 24702
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -1036,6 +1036,9 @@ struct aac_dev
u8 printf_enabled;
u8 in_reset;
u8 msi;
+ int management_fib_count;
+ spinlock_t manage_lock;
+
};
#define aac_adapter_interrupt(dev) \
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0391d75..9c0c911 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -153,7 +153,7 @@ cleanup:
fibptr->hw_fib_pa = hw_fib_pa;
fibptr->hw_fib_va = hw_fib;
}
- if (retval != -EINTR)
+ if (retval != -ERESTARTSYS)
aac_fib_free(fibptr);
return retval;
}
@@ -322,7 +322,7 @@ return_fib:
}
if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) {
- status = -EINTR;
+ status = -ERESTARTSYS;
} else {
/* Lock again and retry */
spin_lock_irqsave(&dev->fib_lock, flags);
@@ -593,10 +593,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u64 addr;
void* p;
if (upsg->sg[i].count >
- (dev->adapter_info.options &
+ ((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
- 65536) {
+ 65536)) {
rcode = -EINVAL;
goto cleanup;
}
@@ -645,10 +645,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u64 addr;
void* p;
if (usg->sg[i].count >
- (dev->adapter_info.options &
+ ((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
- 65536) {
+ 65536)) {
rcode = -EINVAL;
goto cleanup;
}
@@ -695,10 +695,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
uintptr_t addr;
void* p;
if (usg->sg[i].count >
- (dev->adapter_info.options &
+ ((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
- 65536) {
+ 65536)) {
rcode = -EINVAL;
goto cleanup;
}
@@ -734,10 +734,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
dma_addr_t addr;
void* p;
if (upsg->sg[i].count >
- (dev->adapter_info.options &
+ ((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
- 65536) {
+ 65536)) {
rcode = -EINVAL;
goto cleanup;
}
@@ -772,8 +772,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
- if (status == -EINTR) {
- rcode = -EINTR;
+ if (status == -ERESTARTSYS) {
+ rcode = -ERESTARTSYS;
goto cleanup;
}
@@ -810,7 +810,7 @@ cleanup:
for(i=0; i <= sg_indx; i++){
kfree(sg_list[i]);
}
- if (rcode != -EINTR) {
+ if (rcode != -ERESTARTSYS) {
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
@@ -848,7 +848,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
*/
status = aac_dev_ioctl(dev, cmd, arg);
- if(status != -ENOTTY)
+ if (status != -ENOTTY)
return status;
switch (cmd) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 666d515..a726148 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -194,7 +194,9 @@ int aac_send_shutdown(struct aac_dev * dev)
if (status >= 0)
aac_fib_complete(fibctx);
- aac_fib_free(fibctx);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
return status;
}
@@ -304,6 +306,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
/*
* Check the preferred comm settings, defaults from template.
*/
+ dev->management_fib_count = 0;
+ spin_lock_init(&dev->manage_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 956261f..94d2954 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -189,7 +189,14 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
void aac_fib_free(struct fib *fibptr)
{
- unsigned long flags;
+ unsigned long flags, flagsv;
+
+ spin_lock_irqsave(&fibptr->event_lock, flagsv);
+ if (fibptr->done == 2) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
+ return;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
@@ -390,6 +397,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
unsigned long qflags;
+ unsigned long mflags = 0;
+
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
@@ -471,9 +480,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
if (!dev->queues)
return -EBUSY;
- if(wait)
+ if (wait) {
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+ printk(KERN_INFO "No management Fibs Available:%d\n",
+ dev->management_fib_count);
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ return -EBUSY;
+ }
+ dev->management_fib_count++;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
spin_lock_irqsave(&fibptr->event_lock, flags);
- aac_adapter_deliver(fibptr);
+ }
+
+ if (aac_adapter_deliver(fibptr) != 0) {
+ printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ }
+ return -EBUSY;
+ }
+
/*
* If the caller wanted us to wait for response wait now.
@@ -516,14 +547,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
udelay(5);
}
} else if (down_interruptible(&fibptr->event_wait)) {
- fibptr->done = 2;
- up(&fibptr->event_wait);
+ /* Do nothing ... satisfy
+ * down_interruptible must_check */
}
+
spin_lock_irqsave(&fibptr->event_lock, flags);
- if ((fibptr->done == 0) || (fibptr->done == 2)) {
+ if (fibptr->done == 0) {
fibptr->done = 2; /* Tell interrupt we aborted */
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- return -EINTR;
+ return -ERESTARTSYS;
}
spin_unlock_irqrestore(&fibptr->event_lock, flags);
BUG_ON(fibptr->done == 0);
@@ -689,6 +721,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
int aac_fib_complete(struct fib *fibptr)
{
+ unsigned long flags;
struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
@@ -709,6 +742,13 @@ int aac_fib_complete(struct fib *fibptr)
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ if (fibptr->done == 2) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
{
@@ -1355,7 +1395,10 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
if (status >= 0)
aac_fib_complete(fibctx);
- aac_fib_free(fibctx);
+ /* FIB should be freed only after getting
+ * the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
}
}
@@ -1759,6 +1802,7 @@ int aac_command_thread(void *data)
struct fib *fibptr;
if ((fibptr = aac_fib_alloc(dev))) {
+ int status;
__le32 *info;
aac_fib_init(fibptr);
@@ -1769,15 +1813,21 @@ int aac_command_thread(void *data)
*info = cpu_to_le32(now.tv_sec);
- (void)aac_fib_send(SendHostTime,
+ status = aac_fib_send(SendHostTime,
fibptr,
sizeof(*info),
FsaNormal,
1, 1,
NULL,
NULL);
- aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
}
difference = (long)(unsigned)update_interval*HZ;
} else {
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index abc9ef5..9c7408fe 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -57,9 +57,9 @@ unsigned int aac_response_normal(struct aac_queue * q)
struct hw_fib * hwfib;
struct fib * fib;
int consumed = 0;
- unsigned long flags;
+ unsigned long flags, mflags;
- spin_lock_irqsave(q->lock, flags);
+ spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
@@ -125,12 +125,21 @@ unsigned int aac_response_normal(struct aac_queue * q)
} else {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
- if (!fib->done)
+ if (!fib->done) {
fib->done = 1;
- up(&fib->event_wait);
+ up(&fib->event_wait);
+ }
spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
aac_fib_free(fib);
}
@@ -232,6 +241,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
{
+ unsigned long mflags;
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
if ((index & 0x00000002L)) {
struct hw_fib * hw_fib;
@@ -320,11 +330,25 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
- if (!fib->done)
+ if (!fib->done) {
fib->done = 1;
- up(&fib->event_wait);
+ up(&fib->event_wait);
+ }
spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+
}
return 0;
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 4d419c1..78971db 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3171,13 +3171,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
tinfo->curr.transport_version = 2;
tinfo->goal.transport_version = 2;
tinfo->goal.ppr_options = 0;
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so
- * that command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting
+ * for selection queue that may
+ * also be for this target so that
+ * command ordering is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
}
} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
@@ -3194,13 +3197,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
MSG_EXT_WDTR_BUS_8_BIT,
AHD_TRANS_CUR|AHD_TRANS_GOAL,
/*paused*/TRUE);
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so that
- * command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
&& ppr_busfree == 0) {
@@ -3217,13 +3223,16 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
/*ppr_options*/0,
AHD_TRANS_CUR|AHD_TRANS_GOAL,
/*paused*/TRUE);
- /*
- * Remove any SCBs in the waiting for selection
- * queue that may also be for this target so that
- * command ordering is preserved.
- */
- ahd_freeze_devq(ahd, scb);
- ahd_qinfifo_requeue_tail(ahd, scb);
+ if (scb != NULL) {
+ /*
+ * Remove any SCBs in the waiting for
+ * selection queue that may also be for
+ * this target so that command ordering
+ * is preserved.
+ */
+ ahd_freeze_devq(ahd, scb);
+ ahd_qinfifo_requeue_tail(ahd, scb);
+ }
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
&& ahd_sent_msg(ahd, AHDMSG_1B,
@@ -3251,7 +3260,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
* the message phases. We check it last in case we
* had to send some other message that caused a busfree.
*/
- if (printerror != 0
+ if (scb != NULL && printerror != 0
&& (lastphase == P_MESGIN || lastphase == P_MESGOUT)
&& ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) {
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 4775426..9e71ac6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
if (info->scsi.phase == PHASE_IDLE)
fas216_kick(info);
- mod_timer(&info->eh_timer, 30 * HZ);
+ mod_timer(&info->eh_timer, jiffies + 30 * HZ);
spin_unlock_irqrestore(&info->host_lock, flags);
/*
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 698a527..f008708 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
while ((compl = be_mcc_compl_get(phba))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
+ if (is_link_state_evt(compl->flags))
+ /* Interpret compl as a async link evt */
+ beiscsi_async_link_state_process(phba,
+ (struct be_async_event_link_state *) compl);
+ else
+ SE_DEBUG(DBG_LVL_1,
+ " Unsupported Async Event, flags"
+ " = 0x%08x \n", compl->flags);
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(ctrl, compl);
atomic_dec(&phba->ctrl.mcc_obj.q.used);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 2b973f3..6cf9dc3 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
extern u64 iscsi_error_mask;
extern unsigned int en_tcp_dack;
extern unsigned int event_coal_div;
+extern unsigned int event_coal_min;
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d763..1af578d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
{
struct bnx2i_5771x_cq_db *cq_db;
u16 cq_index;
+ u16 next_index;
+ u32 num_active_cmds;
+
+ /* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
+ /* Do not update CQ DB multiple times before firmware writes
+ * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
+ * interrupts and other unwanted results
+ */
+ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
if (action == CNIC_ARM_CQE) {
- cq_index = ep->qp.cqe_exp_seq_sn +
- ep->num_active_cmds / event_coal_div;
- cq_index %= (ep->qp.cqe_size * 2 + 1);
- if (!cq_index) {
+ num_active_cmds = ep->num_active_cmds;
+ if (num_active_cmds <= event_coal_min)
+ next_index = 1;
+ else
+ next_index = event_coal_min +
+ (num_active_cmds - event_coal_min) / event_coal_div;
+ if (!next_index)
+ next_index = 1;
+ cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
+ if (cq_index > ep->qp.cqe_size * 2)
+ cq_index -= ep->qp.cqe_size * 2;
+ if (!cq_index)
cq_index = 1;
- cq_db = (struct bnx2i_5771x_cq_db *)
- ep->qp.cq_pgtbl_virt;
- cq_db->sqn[0] = cq_index;
- }
+
+ cq_db->sqn[0] = cq_index;
}
}
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
+ u32 scsi_lun[2];
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
tmfabort_wqe->op_attr = 0;
tmfabort_wqe->op_attr =
ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
- tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
- tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
tmfabort_wqe->reserved2 = 0;
tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
- if (!ctask || ctask->sc)
+ if (!ctask || !ctask->sc)
/*
* the iscsi layer must have completed the cmd while this
* was starting up.
+ *
+ * Note: In the case of a SCSI cmd timeout, the task's sc
+ * is still active; hence ctask->sc != 0
+ * In this case, the task must be aborted
*/
return 0;
+
ref_sc = ctask->sc;
+ /* Retrieve LUN directly from the ref_sc */
+ int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
+ tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+ tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
else
dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
- tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+ tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d..6d8172e 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.0.1e"
-#define DRV_MODULE_RELDATE "June 22, 2009"
+#define DRV_MODULE_VERSION "2.1.0"
+#define DRV_MODULE_RELDATE "Dec 06, 2009"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
static DEFINE_MUTEX(bnx2i_dev_lock);
+unsigned int event_coal_min = 24;
+module_param(event_coal_min, int, 0664);
+MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
+
unsigned int event_coal_div = 1;
module_param(event_coal_div, int, 0664);
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+ else
+ printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
+ hba->pci_did);
}
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
printk(KERN_INFO "%s", version);
- if (!is_power_of_2(sq_size))
+ if (sq_size && !is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 132898c..33b2294 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
struct iscsi_task *task = session->cmds[i];
struct bnx2i_cmd *cmd = task->dd_data;
- /* Anil */
task->hdr = &cmd->hdr;
task->hdr_max = sizeof(struct iscsi_hdr);
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->pci_svid = hba->pcidev->subsystem_vendor;
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
- bnx2i_identify_device(hba);
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4..15a00e8 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
c3cn_hold(c3cn);
spin_lock_bh(&c3cn->lock);
if (c3cn->state == C3CN_STATE_CONNECTING)
- fail_act_open(c3cn, EHOSTUNREACH);
+ fail_act_open(c3cn, -EHOSTUNREACH);
spin_unlock_bh(&c3cn->lock);
c3cn_put(c3cn);
__kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
{
switch (status) {
case CPL_ERR_CONN_RESET:
- return ECONNREFUSED;
+ return -ECONNREFUSED;
case CPL_ERR_ARP_MISS:
- return EHOSTUNREACH;
+ return -EHOSTUNREACH;
case CPL_ERR_CONN_TIMEDOUT:
- return ETIMEDOUT;
+ return -ETIMEDOUT;
case CPL_ERR_TCAM_FULL:
- return ENOMEM;
+ return -ENOMEM;
case CPL_ERR_CONN_EXIST:
cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
- return EADDRINUSE;
+ return -EADDRINUSE;
default:
- return EIO;
+ return -EIO;
}
}
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
spin_lock_bh(&c3cn->lock);
skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
if (!skb)
- fail_act_open(c3cn, ENOMEM);
+ fail_act_open(c3cn, -ENOMEM);
else {
skb->sk = (struct sock *)c3cn;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
case CPL_ERR_BAD_SYN: /* fall through */
case CPL_ERR_CONN_RESET:
return c3cn->state > C3CN_STATE_ESTABLISHED ?
- EPIPE : ECONNRESET;
+ -EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
case CPL_ERR_PERSIST_TIMEDOUT:
case CPL_ERR_FINWAIT2_TIMEDOUT:
case CPL_ERR_KEEPALIVE_TIMEDOUT:
- return ETIMEDOUT;
+ return -ETIMEDOUT;
default:
- return EIO;
+ return -EIO;
}
}
@@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
static int is_cxgb3_dev(struct net_device *dev)
{
struct cxgb3i_sdev_data *cdata;
+ struct net_device *ndev = dev;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(dev);
write_lock(&cdata_rwlock);
list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev)
int i;
for (i = 0; i < ports->nports; i++)
- if (dev == ports->lldevs[i]) {
+ if (ndev == ports->lldevs[i]) {
write_unlock(&cdata_rwlock);
return 1;
}
@@ -1563,9 +1567,29 @@ free_tid:
s3_free_atid(cdev, c3cn->tid);
c3cn->tid = 0;
out_err:
- return -1;
+ return -EINVAL;
}
+/**
+ * cxgb3i_find_dev - find the interface associated with the given address
+ * @ipaddr: ip address
+ */
+static struct net_device *
+cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
+{
+ struct flowi fl;
+ int err;
+ struct rtable *rt;
+
+ memset(&fl, 0, sizeof(fl));
+ fl.nl_u.ip4_u.daddr = ipaddr;
+
+ err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+ if (!err)
+ return (&rt->u.dst)->dev;
+
+ return NULL;
+}
/**
* cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
struct cxgb3i_sdev_data *cdata;
struct t3cdev *cdev;
__be32 sipv4;
+ struct net_device *dstdev;
int err;
c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
c3cn->daddr.sin_port = usin->sin_port;
c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
+ dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
+ if (!dstdev || !is_cxgb3_dev(dstdev))
+ return -ENETUNREACH;
+
+ if (dstdev->priv_flags & IFF_802_1Q_VLAN)
+ dev = dstdev;
+
rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
c3cn->daddr.sin_addr.s_addr,
c3cn->saddr.sin_port,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 7091050..1fe3b0f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
if (err > 0) {
int pdulen = err;
- cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
- task, skb, skb->len, skb->data_len, err);
+ cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+ task, skb, skb->len, skb->data_len, err);
if (task->conn->hdrdgst_en)
pdulen += ISCSI_DIGEST_SIZE;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 47cfe1c..1a66019 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1724"},
{"IBM", "1726"},
{"IBM", "1742"},
+ {"IBM", "1745"},
+ {"IBM", "1746"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a30ffaa..2f47ae7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
static int fcoe_create(const char *, struct kernel_param *);
static int fcoe_destroy(const char *, struct kernel_param *);
+static int fcoe_enable(const char *, struct kernel_param *);
+static int fcoe_disable(const char *, struct kernel_param *);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
@@ -545,6 +553,23 @@ static void fcoe_queue_timer(ulong lport)
}
/**
+ * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+ * @netdev: the associated net device
+ * @wwn: the output WWN
+ * @type: the type of WWN (WWPN or WWNN)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ if (ops->ndo_fcoe_get_wwn)
+ return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+ return -EINVAL;
+}
+
+/**
* fcoe_netdev_config() - Set up net devive for SW FCoE
* @lport: The local port that is associated with the net device
* @netdev: The associated net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
*/
if (netdev->priv_flags & IFF_802_1Q_VLAN)
vid = vlan_dev_vlan_id(netdev);
- wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+
+ if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+ wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
+ if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+ wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+ 2, vid);
fc_set_wwpn(lport, wwpn);
}
@@ -1231,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
"CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
@@ -1838,6 +1867,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
}
/**
+ * fcoe_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module paramter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (fcoe)
+ fc_fabric_logoff(fcoe->ctlr.lp);
+ else
+ rc = -ENODEV;
+
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module paramter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (fcoe)
+ rc = fc_fabric_login(fcoe->ctlr.lp);
+ else
+ rc = -ENODEV;
+
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @buffer: The name of the Ethernet interface to be destroyed
* @kp: The associated kernel parameter
@@ -1882,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
fcoe_interface_cleanup(fcoe);
rtnl_unlock();
fcoe_if_destroy(fcoe->ctlr.lp);
+ module_put(THIS_MODULE);
+
out_putdev:
dev_put(netdev);
out_nodev:
@@ -1932,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
}
#endif
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto out_nomod;
+ }
+
rtnl_lock();
netdev = fcoe_if_to_netdev(buffer);
if (!netdev) {
@@ -1972,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
if (!fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
- rc = 0;
-out_free:
/*
* Release from init in fcoe_interface_create(), on success lport
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
+ dev_put(netdev);
+ rtnl_unlock();
+ mutex_unlock(&fcoe_config_mutex);
+
+ return 0;
+out_free:
+ fcoe_interface_put(fcoe);
out_putdev:
dev_put(netdev);
out_nodev:
rtnl_unlock();
+ module_put(THIS_MODULE);
+out_nomod:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 9823291..511cb6b 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
next_timer = fip->ctlr_ka_time;
if (time_after_eq(jiffies, fip->port_ka_time)) {
- fip->port_ka_time += jiffies +
+ fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_KA_PERIOD);
fip->send_port_ka = 1;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 0000000..bb96fdd
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3531 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/compat.h>
+#include <linux/blktrace_api.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <linux/cciss_ioctl.h>
+#include <linux/string.h>
+#include <linux/bitmap.h>
+#include <asm/atomic.h>
+#include <linux/kthread.h>
+#include "hpsa_cmd.h"
+#include "hpsa.h"
+
+/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+#define HPSA_DRIVER_VERSION "1.0.0"
+#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+
+/* How long to wait (in milliseconds) for board to go into simple mode */
+#define MAX_CONFIG_WAIT 30000
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
+ HPSA_DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION(HPSA_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static int hpsa_allow_any;
+module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_allow_any,
+ "Allow hpsa driver to access unknown HP Smart Array hardware");
+
+/* define the PCI info for the cards we can control */
+static const struct pci_device_id hpsa_pci_device_id[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
+
+/* board_id = Subsystem Device ID & Vendor ID
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+static struct board_type products[] = {
+ {0x3223103C, "Smart Array P800", &SA5_access},
+ {0x3234103C, "Smart Array P400", &SA5_access},
+ {0x323d103c, "Smart Array P700M", &SA5_access},
+ {0x3241103C, "Smart Array P212", &SA5_access},
+ {0x3243103C, "Smart Array P410", &SA5_access},
+ {0x3245103C, "Smart Array P410i", &SA5_access},
+ {0x3247103C, "Smart Array P411", &SA5_access},
+ {0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324a103C, "Smart Array P712m", &SA5_access},
+ {0x324b103C, "Smart Array P711m", &SA5_access},
+ {0xFFFF103C, "Unknown Smart Array", &SA5_access},
+};
+
+static int number_of_controllers;
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
+static void start_io(struct ctlr_info *h);
+
+#ifdef CONFIG_COMPAT
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
+#endif
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c);
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type);
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *));
+
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_slave_alloc(struct scsi_device *sdev);
+static void hpsa_slave_destroy(struct scsi_device *sdev);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
+static ssize_t host_store_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hpsa",
+ .proc_name = "hpsa",
+ .queuecommand = hpsa_scsi_queue_command,
+ .can_queue = 512,
+ .this_id = -1,
+ .sg_tablesize = MAXSGENTRIES,
+ .cmd_per_lun = 512,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+};
+
+static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
+{
+ unsigned long *priv = shost_priv(sdev->host);
+ return (struct ctlr_info *) *priv;
+}
+
+static struct task_struct *hpsa_scan_thread;
+static DEFINE_MUTEX(hpsa_scan_mutex);
+static LIST_HEAD(hpsa_scan_q);
+static int hpsa_scan_func(void *data);
+
+/**
+ * add_to_scan_list() - add controller to rescan queue
+ * @h: Pointer to the controller.
+ *
+ * Adds the controller to the rescan queue if not already on the queue.
+ *
+ * returns 1 if added to the queue, 0 if skipped (could be on the
+ * queue already, or the controller could be initializing or shutting
+ * down).
+ **/
+static int add_to_scan_list(struct ctlr_info *h)
+{
+ struct ctlr_info *test_h;
+ int found = 0;
+ int ret = 0;
+
+ if (h->busy_initializing)
+ return 0;
+
+ /*
+ * If we don't get the lock, it means the driver is unloading
+ * and there's no point in scheduling a new scan.
+ */
+ if (!mutex_trylock(&h->busy_shutting_down))
+ return 0;
+
+ mutex_lock(&hpsa_scan_mutex);
+ list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
+ if (test_h == h) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found && !h->busy_scanning) {
+ INIT_COMPLETION(h->scan_wait);
+ list_add_tail(&h->scan_list, &hpsa_scan_q);
+ ret = 1;
+ }
+ mutex_unlock(&hpsa_scan_mutex);
+ mutex_unlock(&h->busy_shutting_down);
+
+ return ret;
+}
+
+/**
+ * remove_from_scan_list() - remove controller from rescan queue
+ * @h: Pointer to the controller.
+ *
+ * Removes the controller from the rescan queue if present. Blocks if
+ * the controller is currently conducting a rescan. The controller
+ * can be in one of three states:
+ * 1. Doesn't need a scan
+ * 2. On the scan list, but not scanning yet (we remove it)
+ * 3. Busy scanning (and not on the list). In this case we want to wait for
+ * the scan to complete to make sure the scanning thread for this
+ * controller is completely idle.
+ **/
+static void remove_from_scan_list(struct ctlr_info *h)
+{
+ struct ctlr_info *test_h, *tmp_h;
+
+ mutex_lock(&hpsa_scan_mutex);
+ list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
+ if (test_h == h) { /* state 2. */
+ list_del(&h->scan_list);
+ complete_all(&h->scan_wait);
+ mutex_unlock(&hpsa_scan_mutex);
+ return;
+ }
+ }
+ if (h->busy_scanning) { /* state 3. */
+ mutex_unlock(&hpsa_scan_mutex);
+ wait_for_completion(&h->scan_wait);
+ } else { /* state 1, nothing to do. */
+ mutex_unlock(&hpsa_scan_mutex);
+ }
+}
+
+/* hpsa_scan_func() - kernel thread used to rescan controllers
+ * @data: Ignored.
+ *
+ * A kernel thread used scan for drive topology changes on
+ * controllers. The thread processes only one controller at a time
+ * using a queue. Controllers are added to the queue using
+ * add_to_scan_list() and removed from the queue either after done
+ * processing or using remove_from_scan_list().
+ *
+ * returns 0.
+ **/
+static int hpsa_scan_func(__attribute__((unused)) void *data)
+{
+ struct ctlr_info *h;
+ int host_no;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (kthread_should_stop())
+ break;
+
+ while (1) {
+ mutex_lock(&hpsa_scan_mutex);
+ if (list_empty(&hpsa_scan_q)) {
+ mutex_unlock(&hpsa_scan_mutex);
+ break;
+ }
+ h = list_entry(hpsa_scan_q.next, struct ctlr_info,
+ scan_list);
+ list_del(&h->scan_list);
+ h->busy_scanning = 1;
+ mutex_unlock(&hpsa_scan_mutex);
+ host_no = h->scsi_host ? h->scsi_host->host_no : -1;
+ hpsa_update_scsi_devices(h, host_no);
+ complete_all(&h->scan_wait);
+ mutex_lock(&hpsa_scan_mutex);
+ h->busy_scanning = 0;
+ mutex_unlock(&hpsa_scan_mutex);
+ }
+ }
+ return 0;
+}
+
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+ return 0;
+
+ switch (c->err_info->SenseInfo[12]) {
+ case STATE_CHANGED:
+ dev_warn(&h->pdev->dev, "hpsa%d: a state change "
+ "detected, command retried\n", h->ctlr);
+ break;
+ case LUN_FAILED:
+ dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
+ "detected, action required\n", h->ctlr);
+ break;
+ case REPORT_LUNS_CHANGED:
+ dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+ "changed\n", h->ctlr);
+ /*
+ * Here, we could call add_to_scan_list and wake up the scan thread,
+ * except that it's quite likely that we will get more than one
+ * REPORT_LUNS_CHANGED condition in quick succession, which means
+ * that those which occur after the first one will likely happen
+ * *during* the hpsa_scan_thread's rescan. And the rescan code is not
+ * robust enough to restart in the middle, undoing what it has already
+ * done, and it's not clear that it's even possible to do this, since
+ * part of what it does is notify the SCSI mid layer, which starts
+ * doing it's own i/o to read partition tables and so on, and the
+ * driver doesn't have visibility to know what might need undoing.
+ * In any event, if possible, it is horribly complicated to get right
+ * so we just don't do it for now.
+ *
+ * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+ */
+ break;
+ case POWER_OR_RESET:
+ dev_warn(&h->pdev->dev, "hpsa%d: a power on "
+ "or device reset detected\n", h->ctlr);
+ break;
+ case UNIT_ATTENTION_CLEARED:
+ dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
+ "cleared by another initiator\n", h->ctlr);
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "hpsa%d: unknown "
+ "unit attention detected\n", h->ctlr);
+ break;
+ }
+ return 1;
+}
+
+static ssize_t host_store_rescan(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ unsigned long *priv = shost_priv(shost);
+ h = (struct ctlr_info *) *priv;
+ if (add_to_scan_list(h)) {
+ wake_up_process(hpsa_scan_thread);
+ wait_for_completion_interruptible(&h->scan_wait);
+ }
+ return count;
+}
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct hlist_head *list, struct CommandList *c)
+{
+ hlist_add_head(&c->list, list);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+ if (WARN_ON(hlist_unhashed(&c->list)))
+ return;
+ hlist_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
+{
+ return (scsi3addr[3] & 0xC0) == 0x40;
+}
+
+static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+ "UNKNOWN"
+};
+#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t l = 0;
+ int rlevel;
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+
+ /* Is this even a logical drive? */
+ if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ l = snprintf(buf, PAGE_SIZE, "N/A\n");
+ return l;
+ }
+
+ rlevel = hdev->raid_level;
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (rlevel < 0 || rlevel > RAID_UNKNOWN)
+ rlevel = RAID_UNKNOWN;
+ l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
+ return l;
+}
+
+static ssize_t lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char lunid[8];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ lunid[0], lunid[1], lunid[2], lunid[3],
+ lunid[4], lunid[5], lunid[6], lunid[7]);
+}
+
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char sn[16];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(sn, hdev->device_id, sizeof(sn));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+
+static int hpsa_find_target_lun(struct ctlr_info *h,
+ unsigned char scsi3addr[], int bus, int *target, int *lun)
+{
+ /* finds an unused bus, target, lun for a new physical device
+ * assumes h->devlock is held
+ */
+ int i, found = 0;
+ DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+
+ memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
+ set_bit(h->dev[i]->target, lun_taken);
+ }
+
+ for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+ if (!test_bit(i, lun_taken)) {
+ /* *bus = 1; */
+ *target = i;
+ *lun = 0;
+ found = 1;
+ break;
+ }
+ }
+ return !found;
+}
+
+/* Add an entry into h->dev[] array. */
+static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *device,
+ struct hpsa_scsi_dev_t *added[], int *nadded)
+{
+ /* assumes h->devlock is held */
+ int n = h->ndevices;
+ int i;
+ unsigned char addr1[8], addr2[8];
+ struct hpsa_scsi_dev_t *sd;
+
+ if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+ dev_err(&h->pdev->dev, "too many devices, some will be "
+ "inaccessible.\n");
+ return -1;
+ }
+
+ /* physical devices do not have lun or target assigned until now. */
+ if (device->lun != -1)
+ /* Logical device, lun is already assigned. */
+ goto lun_assigned;
+
+ /* If this device a non-zero lun of a multi-lun device
+ * byte 4 of the 8-byte LUN addr will contain the logical
+ * unit no, zero otherise.
+ */
+ if (device->scsi3addr[4] == 0) {
+ /* This is not a non-zero lun of a multi-lun device */
+ if (hpsa_find_target_lun(h, device->scsi3addr,
+ device->bus, &device->target, &device->lun) != 0)
+ return -1;
+ goto lun_assigned;
+ }
+
+ /* This is a non-zero lun of a multi-lun device.
+ * Search through our list and find the device which
+ * has the same 8 byte LUN address, excepting byte 4.
+ * Assign the same bus and target for this new LUN.
+ * Use the logical unit number from the firmware.
+ */
+ memcpy(addr1, device->scsi3addr, 8);
+ addr1[4] = 0;
+ for (i = 0; i < n; i++) {
+ sd = h->dev[i];
+ memcpy(addr2, sd->scsi3addr, 8);
+ addr2[4] = 0;
+ /* differ only in byte 4? */
+ if (memcmp(addr1, addr2, 8) == 0) {
+ device->bus = sd->bus;
+ device->target = sd->target;
+ device->lun = device->scsi3addr[4];
+ break;
+ }
+ }
+ if (device->lun == -1) {
+ dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
+ " suspect firmware bug or unsupported hardware "
+ "configuration.\n");
+ return -1;
+ }
+
+lun_assigned:
+
+ h->dev[n] = device;
+ h->ndevices++;
+ added[*nadded] = device;
+ (*nadded)++;
+
+ /* initially, (before registering with scsi layer) we don't
+ * know our hostno and we don't want to print anything first
+ * time anyway (the scsi layer's inquiries will show that info)
+ */
+ /* if (hostno != -1) */
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+ scsi_device_type(device->devtype), hostno,
+ device->bus, device->target, device->lun);
+ return 0;
+}
+
+/* Remove an entry from h->dev[] array. */
+static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+ struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+ /* assumes h->devlock is held */
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+ BUG();
+
+ sd = h->dev[entry];
+ removed[*nremoved] = h->dev[entry];
+ (*nremoved)++;
+
+ for (i = entry; i < h->ndevices-1; i++)
+ h->dev[i] = h->dev[i+1];
+ h->ndevices--;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+ scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
+ sd->lun);
+}
+
+#define SCSI3ADDR_EQ(a, b) ( \
+ (a)[7] == (b)[7] && \
+ (a)[6] == (b)[6] && \
+ (a)[5] == (b)[5] && \
+ (a)[4] == (b)[4] && \
+ (a)[3] == (b)[3] && \
+ (a)[2] == (b)[2] && \
+ (a)[1] == (b)[1] && \
+ (a)[0] == (b)[0])
+
+static void fixup_botched_add(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *added)
+{
+ /* called when scsi_add_device fails in order to re-adjust
+ * h->dev[] to match the mid layer's view.
+ */
+ unsigned long flags;
+ int i, j;
+
+ spin_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i] == added) {
+ for (j = i; j < h->ndevices-1; j++)
+ h->dev[j] = h->dev[j+1];
+ h->ndevices--;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ kfree(added);
+}
+
+static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ struct hpsa_scsi_dev_t *dev2)
+{
+ if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
+ (dev1->lun != -1 && dev2->lun != -1)) &&
+ dev1->devtype != 0x0C)
+ return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
+
+ /* we compare everything except lun and target as these
+ * are not yet assigned. Compare parts likely
+ * to differ first
+ */
+ if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
+ sizeof(dev1->scsi3addr)) != 0)
+ return 0;
+ if (memcmp(dev1->device_id, dev2->device_id,
+ sizeof(dev1->device_id)) != 0)
+ return 0;
+ if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
+ return 0;
+ if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
+ return 0;
+ if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
+ return 0;
+ if (dev1->devtype != dev2->devtype)
+ return 0;
+ if (dev1->raid_level != dev2->raid_level)
+ return 0;
+ if (dev1->bus != dev2->bus)
+ return 0;
+ return 1;
+}
+
+/* Find needle in haystack. If exact match found, return DEVICE_SAME,
+ * and return needle location in *index. If scsi3addr matches, but not
+ * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
+ * location in *index. If needle not found, return DEVICE_NOT_FOUND.
+ */
+static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+ struct hpsa_scsi_dev_t *haystack[], int haystack_size,
+ int *index)
+{
+ int i;
+#define DEVICE_NOT_FOUND 0
+#define DEVICE_CHANGED 1
+#define DEVICE_SAME 2
+ for (i = 0; i < haystack_size; i++) {
+ if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
+ *index = i;
+ if (device_is_the_same(needle, haystack[i]))
+ return DEVICE_SAME;
+ else
+ return DEVICE_CHANGED;
+ }
+ }
+ *index = -1;
+ return DEVICE_NOT_FOUND;
+}
+
+static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *sd[], int nsds)
+{
+ /* sd contains scsi3 addresses and devtypes, and inquiry
+ * data. This function takes what's in sd to be the current
+ * reality and updates h->dev[] to reflect that reality.
+ */
+ int i, entry, device_change, changes = 0;
+ struct hpsa_scsi_dev_t *csd;
+ unsigned long flags;
+ struct hpsa_scsi_dev_t **added, **removed;
+ int nadded, nremoved;
+ struct Scsi_Host *sh = NULL;
+
+ added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+ removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+
+ if (!added || !removed) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "adjust_hpsa_scsi_table\n");
+ goto free_and_out;
+ }
+
+ spin_lock_irqsave(&h->devlock, flags);
+
+ /* find any devices in h->dev[] that are not in
+ * sd[] and remove them from h->dev[], and for any
+ * devices which have changed, remove the old device
+ * info and add the new device info.
+ */
+ i = 0;
+ nremoved = 0;
+ nadded = 0;
+ while (i < h->ndevices) {
+ csd = h->dev[i];
+ device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ hpsa_scsi_remove_entry(h, hostno, i,
+ removed, &nremoved);
+ continue; /* remove ^^^, hence i not incremented */
+ } else if (device_change == DEVICE_CHANGED) {
+ changes++;
+ hpsa_scsi_remove_entry(h, hostno, i,
+ removed, &nremoved);
+ (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
+ added, &nadded);
+ /* add can't fail, we just removed one. */
+ sd[entry] = NULL; /* prevent it from being freed */
+ }
+ i++;
+ }
+
+ /* Now, make sure every device listed in sd[] is also
+ * listed in h->dev[], adding them if they aren't found
+ */
+
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+ device_change = hpsa_scsi_find_entry(sd[i], h->dev,
+ h->ndevices, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ if (hpsa_scsi_add_entry(h, hostno, sd[i],
+ added, &nadded) != 0)
+ break;
+ sd[i] = NULL; /* prevent from being freed later. */
+ } else if (device_change == DEVICE_CHANGED) {
+ /* should never happen... */
+ changes++;
+ dev_warn(&h->pdev->dev,
+ "device unexpectedly changed.\n");
+ /* but if it does happen, we just ignore that device */
+ }
+ }
+ spin_unlock_irqrestore(&h->devlock, flags);
+
+ /* Don't notify scsi mid layer of any changes the first time through
+ * (or if there are no changes) scsi_scan_host will do it later the
+ * first time through.
+ */
+ if (hostno == -1 || !changes)
+ goto free_and_out;
+
+ sh = h->scsi_host;
+ /* Notify scsi mid layer of any removed devices */
+ for (i = 0; i < nremoved; i++) {
+ struct scsi_device *sdev =
+ scsi_device_lookup(sh, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ if (sdev != NULL) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ /* We don't expect to get here.
+ * future cmds to this device will get selection
+ * timeout as if the device was gone.
+ */
+ dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
+ " for removal.", hostno, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ }
+ kfree(removed[i]);
+ removed[i] = NULL;
+ }
+
+ /* Notify scsi mid layer of any added devices */
+ for (i = 0; i < nadded; i++) {
+ if (scsi_add_device(sh, added[i]->bus,
+ added[i]->target, added[i]->lun) == 0)
+ continue;
+ dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
+ "device not added.\n", hostno, added[i]->bus,
+ added[i]->target, added[i]->lun);
+ /* now we have to remove it from h->dev,
+ * since it didn't get added to scsi mid layer
+ */
+ fixup_botched_add(h, added[i]);
+ }
+
+free_and_out:
+ kfree(added);
+ kfree(removed);
+ return 0;
+}
+
+/*
+ * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
+ * Assume's h->devlock is held.
+ */
+static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+ int bus, int target, int lun)
+{
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ for (i = 0; i < h->ndevices; i++) {
+ sd = h->dev[i];
+ if (sd->bus == bus && sd->target == target && sd->lun == lun)
+ return sd;
+ }
+ return NULL;
+}
+
+/* link sdev->hostdata to our per-device structure. */
+static int hpsa_slave_alloc(struct scsi_device *sdev)
+{
+ struct hpsa_scsi_dev_t *sd;
+ unsigned long flags;
+ struct ctlr_info *h;
+
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->devlock, flags);
+ sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+ if (sd != NULL)
+ sdev->hostdata = sd;
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return 0;
+}
+
+static void hpsa_slave_destroy(struct scsi_device *sdev)
+{
+ return; /* nothing to do. */
+}
+
+static void hpsa_scsi_setup(struct ctlr_info *h)
+{
+ h->ndevices = 0;
+ h->scsi_host = NULL;
+ spin_lock_init(&h->devlock);
+ return;
+}
+
+static void complete_scsi_command(struct CommandList *cp,
+ int timeout, __u32 tag)
+{
+ struct scsi_cmnd *cmd;
+ struct ctlr_info *h;
+ struct ErrorInfo *ei;
+
+ unsigned char sense_key;
+ unsigned char asc; /* additional sense code */
+ unsigned char ascq; /* additional sense code qualifier */
+
+ ei = cp->err_info;
+ cmd = (struct scsi_cmnd *) cp->scsi_cmd;
+ h = cp->h;
+
+ scsi_dma_unmap(cmd); /* undo the DMA mappings */
+
+ cmd->result = (DID_OK << 16); /* host byte */
+ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ cmd->result |= (ei->ScsiStatus << 1);
+
+ /* copy the sense data whether we need to or not. */
+ memcpy(cmd->sense_buffer, ei->SenseInfo,
+ ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+ SCSI_SENSE_BUFFERSIZE :
+ ei->SenseLen);
+ scsi_set_resid(cmd, ei->ResidualCnt);
+
+ if (ei->CommandStatus == 0) {
+ cmd->scsi_done(cmd);
+ cmd_free(h, cp);
+ return;
+ }
+
+ /* an error has occurred */
+ switch (ei->CommandStatus) {
+
+ case CMD_TARGET_STATUS:
+ if (ei->ScsiStatus) {
+ /* Get sense key */
+ sense_key = 0xf & ei->SenseInfo[2];
+ /* Get additional sense code */
+ asc = ei->SenseInfo[12];
+ /* Get addition sense code qualifier */
+ ascq = ei->SenseInfo[13];
+ }
+
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (check_for_unit_attention(h, cp)) {
+ cmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+ if (sense_key == ILLEGAL_REQUEST) {
+ /*
+ * SCSI REPORT_LUNS is commonly unsupported on
+ * Smart Array. Suppress noisy complaint.
+ */
+ if (cp->Request.CDB[0] == REPORT_LUNS)
+ break;
+
+ /* If ASC/ASCQ indicate Logical Unit
+ * Not Supported condition,
+ */
+ if ((asc == 0x25) && (ascq == 0x0)) {
+ dev_warn(&h->pdev->dev, "cp %p "
+ "has check condition\n", cp);
+ break;
+ }
+ }
+
+ if (sense_key == NOT_READY) {
+ /* If Sense is Not Ready, Logical Unit
+ * Not ready, Manual Intervention
+ * required
+ */
+ if ((asc == 0x04) && (ascq == 0x03)) {
+ cmd->result = DID_NO_CONNECT << 16;
+ dev_warn(&h->pdev->dev, "cp %p "
+ "has check condition: unit "
+ "not ready, manual "
+ "intervention required\n", cp);
+ break;
+ }
+ }
+
+
+ /* Must be some other type of check condition */
+ dev_warn(&h->pdev->dev, "cp %p has check condition: "
+ "unknown type: "
+ "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ "Returning result: 0x%x, "
+ "cmd=[%02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x]\n",
+ cp, sense_key, asc, ascq,
+ cmd->result,
+ cmd->cmnd[0], cmd->cmnd[1],
+ cmd->cmnd[2], cmd->cmnd[3],
+ cmd->cmnd[4], cmd->cmnd[5],
+ cmd->cmnd[6], cmd->cmnd[7],
+ cmd->cmnd[8], cmd->cmnd[9]);
+ break;
+ }
+
+
+ /* Problem was not a check condition
+ * Pass it up to the upper layers...
+ */
+ if (ei->ScsiStatus) {
+ dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
+ "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ "Returning result: 0x%x\n",
+ cp, ei->ScsiStatus,
+ sense_key, asc, ascq,
+ cmd->result);
+ } else { /* scsi status is zero??? How??? */
+ dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
+ "Returning no connection.\n", cp),
+
+ /* Ordinarily, this case should never happen,
+ * but there is a bug in some released firmware
+ * revisions that allows it to happen if, for
+ * example, a 4100 backplane loses power and
+ * the tape drive is in it. We assume that
+ * it's a fatal error of some kind because we
+ * can't show that it wasn't. We will make it
+ * look like selection timeout since that is
+ * the most common reason for this to occur,
+ * and it's severe enough.
+ */
+
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ break;
+ case CMD_DATA_OVERRUN:
+ dev_warn(&h->pdev->dev, "cp %p has"
+ " completed with data overrun "
+ "reported\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* print_bytes(cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ /* We get CMD_INVALID if you address a non-existent device
+ * instead of a selection timeout (no response). You will
+ * see this if you yank out a drive, then try to access it.
+ * This is kind of a shame because it means that any other
+ * CMD_INVALID (e.g. driver bug) will get interpreted as a
+ * missing target. */
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ dev_warn(&h->pdev->dev, "cp %p has "
+ "protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ cmd->result = DID_ABORT << 16;
+ dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
+ cp, ei->ScsiStatus);
+ break;
+ case CMD_ABORT_FAILED:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ cmd->result = DID_ABORT << 16;
+ dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+ "abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ cmd->result = DID_TIME_OUT << 16;
+ dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
+ break;
+ default:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
+ cp, ei->CommandStatus);
+ }
+ cmd->scsi_done(cmd);
+ cmd_free(h, cp);
+}
+
+static int hpsa_scsi_detect(struct ctlr_info *h)
+{
+ struct Scsi_Host *sh;
+ int error;
+
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ if (sh == NULL)
+ goto fail;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->this_id = -1;
+ sh->max_channel = 3;
+ sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_lun = HPSA_MAX_LUN;
+ sh->max_id = HPSA_MAX_LUN;
+ h->scsi_host = sh;
+ sh->hostdata[0] = (unsigned long) h;
+ sh->irq = h->intr[SIMPLE_MODE_INT];
+ sh->unique_id = sh->irq;
+ error = scsi_add_host(sh, &h->pdev->dev);
+ if (error)
+ goto fail_host_put;
+ scsi_scan_host(sh);
+ return 0;
+
+ fail_host_put:
+ dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
+ " failed for controller %d\n", h->ctlr);
+ scsi_host_put(sh);
+ return -1;
+ fail:
+ dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
+ " failed for controller %d\n", h->ctlr);
+ return -1;
+}
+
+static void hpsa_pci_unmap(struct pci_dev *pdev,
+ struct CommandList *c, int sg_used, int data_direction)
+{
+ int i;
+ union u64bit addr64;
+
+ for (i = 0; i < sg_used; i++) {
+ addr64.val32.lower = c->SG[i].Addr.lower;
+ addr64.val32.upper = c->SG[i].Addr.upper;
+ pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
+ data_direction);
+ }
+}
+
+static void hpsa_map_one(struct pci_dev *pdev,
+ struct CommandList *cp,
+ unsigned char *buf,
+ size_t buflen,
+ int data_direction)
+{
+ __u64 addr64;
+
+ if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return;
+ }
+
+ addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+ cp->SG[0].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Len = buflen;
+ cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+}
+
+static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ c->waiting = &wait;
+ enqueue_cmd_and_start_io(h, c);
+ wait_for_completion(&wait);
+}
+
+static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
+ struct CommandList *c, int data_direction)
+{
+ int retry_count = 0;
+
+ do {
+ memset(c->err_info, 0, sizeof(c->err_info));
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ retry_count++;
+ } while (check_for_unit_attention(h, c) && retry_count <= 3);
+ hpsa_pci_unmap(h->pdev, c, 1, data_direction);
+}
+
+static void hpsa_scsi_interpret_error(struct CommandList *cp)
+{
+ struct ErrorInfo *ei;
+ struct device *d = &cp->h->pdev->dev;
+
+ ei = cp->err_info;
+ switch (ei->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ dev_warn(d, "cmd %p has completed with errors\n", cp);
+ dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
+ ei->ScsiStatus);
+ if (ei->ScsiStatus == 0)
+ dev_warn(d, "SCSI status is abnormally zero. "
+ "(probably indicates selection timeout "
+ "reported incorrectly due to a known "
+ "firmware bug, circa July, 2001.)\n");
+ break;
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ dev_info(d, "UNDERRUN\n");
+ break;
+ case CMD_DATA_OVERRUN:
+ dev_warn(d, "cp %p has completed with data overrun\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* controller unfortunately reports SCSI passthru's
+ * to non-existent targets as invalid commands.
+ */
+ dev_warn(d, "cp %p is reported invalid (probably means "
+ "target device no longer present)\n", cp);
+ /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ dev_warn(d, "cp %p has protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ /* cmd->result = DID_ERROR << 16; */
+ dev_warn(d, "cp %p had hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ dev_warn(d, "cp %p had connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ dev_warn(d, "cp %p was aborted\n", cp);
+ break;
+ case CMD_ABORT_FAILED:
+ dev_warn(d, "cp %p reports abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ dev_warn(d, "cp %p timed out\n", cp);
+ break;
+ default:
+ dev_warn(d, "cp %p returned unknown status %x\n", cp,
+ ei->CommandStatus);
+ }
+}
+
+static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
+ unsigned char page, unsigned char *buf,
+ unsigned char bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ ei = c->err_info;
+ if (ei->CommandStatus != 0) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static void hpsa_get_raid_level(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char *raid_level)
+{
+ int rc;
+ unsigned char *buf;
+
+ *raid_level = RAID_UNKNOWN;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+ if (rc == 0)
+ *raid_level = buf[8];
+ if (*raid_level > RAID_UNKNOWN)
+ *raid_level = RAID_UNKNOWN;
+ kfree(buf);
+ return;
+}
+
+/* Get the device id from inquiry page 0x83 */
+static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
+ unsigned char *device_id, int buflen)
+{
+ int rc;
+ unsigned char *buf;
+
+ if (buflen > 16)
+ buflen = 16;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ if (rc == 0)
+ memcpy(device_id, &buf[8], buflen);
+ kfree(buf);
+ return rc != 0;
+}
+
+static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
+ struct ReportLUNdata *buf, int bufsize,
+ int extended_response)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ unsigned char scsi3addr[8];
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ memset(&scsi3addr[0], 0, 8); /* address the controller */
+
+ fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (extended_response)
+ c->Request.CDB[1] = extended_response;
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 &&
+ ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
+ struct ReportLUNdata *buf,
+ int bufsize, int extended_response)
+{
+ return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
+}
+
+static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
+ struct ReportLUNdata *buf, int bufsize)
+{
+ return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
+}
+
+static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
+ int bus, int target, int lun)
+{
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
+}
+
+static int hpsa_update_device_info(struct ctlr_info *h,
+ unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+{
+#define OBDR_TAPE_INQ_SIZE 49
+ unsigned char *inq_buff = NULL;
+
+ inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ if (!inq_buff)
+ goto bail_out;
+
+ memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+ /* Do an inquiry to the device to see what it is. */
+ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+ /* Inquiry failed (msg printed already) */
+ dev_err(&h->pdev->dev,
+ "hpsa_update_device_info: inquiry failed\n");
+ goto bail_out;
+ }
+
+ /* As a side effect, record the firmware version number
+ * if we happen to be talking to the RAID controller.
+ */
+ if (is_hba_lunid(scsi3addr))
+ memcpy(h->firm_ver, &inq_buff[32], 4);
+
+ this_device->devtype = (inq_buff[0] & 0x1f);
+ memcpy(this_device->scsi3addr, scsi3addr, 8);
+ memcpy(this_device->vendor, &inq_buff[8],
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
+ memcpy(this_device->revision, &inq_buff[32],
+ sizeof(this_device->revision));
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ hpsa_get_device_id(h, scsi3addr, this_device->device_id,
+ sizeof(this_device->device_id));
+
+ if (this_device->devtype == TYPE_DISK &&
+ is_logical_dev_addr_mode(scsi3addr))
+ hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
+ else
+ this_device->raid_level = RAID_UNKNOWN;
+
+ kfree(inq_buff);
+ return 0;
+
+bail_out:
+ kfree(inq_buff);
+ return 1;
+}
+
+static unsigned char *msa2xxx_model[] = {
+ "MSA2012",
+ "MSA2024",
+ "MSA2312",
+ "MSA2324",
+ NULL,
+};
+
+static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+{
+ int i;
+
+ for (i = 0; msa2xxx_model[i]; i++)
+ if (strncmp(device->model, msa2xxx_model[i],
+ strlen(msa2xxx_model[i])) == 0)
+ return 1;
+ return 0;
+}
+
+/* Helper function to assign bus, target, lun mapping of devices.
+ * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
+ * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
+ * Logical drive target and lun are assigned at this time, but
+ * physical device lun and target assignment are deferred (assigned
+ * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
+ */
+static void figure_bus_target_lun(struct ctlr_info *h,
+ __u8 *lunaddrbytes, int *bus, int *target, int *lun,
+ struct hpsa_scsi_dev_t *device)
+{
+
+ __u32 lunid;
+
+ if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* logical device */
+ memcpy(&lunid, lunaddrbytes, sizeof(lunid));
+ lunid = le32_to_cpu(lunid);
+
+ if (is_msa2xxx(h, device)) {
+ *bus = 1;
+ *target = (lunid >> 16) & 0x3fff;
+ *lun = lunid & 0x00ff;
+ } else {
+ *bus = 0;
+ *lun = 0;
+ *target = lunid & 0x3fff;
+ }
+ } else {
+ /* physical device */
+ if (is_hba_lunid(lunaddrbytes))
+ *bus = 3;
+ else
+ *bus = 2;
+ *target = -1;
+ *lun = -1; /* we will fill these in later. */
+ }
+}
+
+/*
+ * If there is no lun 0 on a target, linux won't find any devices.
+ * For the MSA2xxx boxes, we have to manually detect the enclosure
+ * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
+ * it for some reason. *tmpdevice is the target we're adding,
+ * this_device is a pointer into the current element of currentsd[]
+ * that we're building up in update_scsi_devices(), below.
+ * lunzerobits is a bitmap that tracks which targets already have a
+ * lun 0 assigned.
+ * Returns 1 if an enclosure was added, 0 if not.
+ */
+static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *tmpdevice,
+ struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
+ int bus, int target, int lun, unsigned long lunzerobits[],
+ int *nmsa2xxx_enclosures)
+{
+ unsigned char scsi3addr[8];
+
+ if (test_bit(target, lunzerobits))
+ return 0; /* There is already a lun 0 on this target. */
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes))
+ return 0; /* It's the logical targets that may lack lun 0. */
+
+ if (!is_msa2xxx(h, tmpdevice))
+ return 0; /* It's only the MSA2xxx that have this problem. */
+
+ if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
+ return 0;
+
+ if (is_hba_lunid(scsi3addr))
+ return 0; /* Don't add the RAID controller here. */
+
+#define MAX_MSA2XXX_ENCLOSURES 32
+ if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
+ dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
+ "enclosures exceeded. Check your hardware "
+ "configuration.");
+ return 0;
+ }
+
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
+ if (hpsa_update_device_info(h, scsi3addr, this_device))
+ return 0;
+ (*nmsa2xxx_enclosures)++;
+ hpsa_set_bus_target_lun(this_device, bus, target, 0);
+ set_bit(target, lunzerobits);
+ return 1;
+}
+
+/*
+ * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
+ * logdev. The number of luns in physdev and logdev are returned in
+ * *nphysicals and *nlogicals, respectively.
+ * Returns 0 on success, -1 otherwise.
+ */
+static int hpsa_gather_lun_info(struct ctlr_info *h,
+ int reportlunsize,
+ struct ReportLUNdata *physdev, __u32 *nphysicals,
+ struct ReportLUNdata *logdev, __u32 *nlogicals)
+{
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+ dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+ return -1;
+ }
+ memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
+ *nphysicals = be32_to_cpu(*nphysicals) / 8;
+#ifdef DEBUG
+ dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
+#endif
+ if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
+ " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+ *nphysicals - HPSA_MAX_PHYS_LUN);
+ *nphysicals = HPSA_MAX_PHYS_LUN;
+ }
+ if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
+ dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
+ return -1;
+ }
+ memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
+ *nlogicals = be32_to_cpu(*nlogicals) / 8;
+#ifdef DEBUG
+ dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
+#endif
+ /* Reject Logicals in excess of our max capability. */
+ if (*nlogicals > HPSA_MAX_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_LUN,
+ *nlogicals - HPSA_MAX_LUN);
+ *nlogicals = HPSA_MAX_LUN;
+ }
+ if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical + physical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+ *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
+ *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
+ }
+ return 0;
+}
+
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+{
+ /* the idea here is we could get notified
+ * that some devices have changed, so we do a report
+ * physical luns and report logical luns cmd, and adjust
+ * our list of devices accordingly.
+ *
+ * The scsi3addr's of devices won't change so long as the
+ * adapter is not reset. That means we can rescan and
+ * tell which devices we already know about, vs. new
+ * devices, vs. disappearing devices.
+ */
+ struct ReportLUNdata *physdev_list = NULL;
+ struct ReportLUNdata *logdev_list = NULL;
+ unsigned char *inq_buff = NULL;
+ __u32 nphysicals = 0;
+ __u32 nlogicals = 0;
+ __u32 ndev_allocated = 0;
+ struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
+ int ncurrent = 0;
+ int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+ int i, nmsa2xxx_enclosures, ndevs_to_allocate;
+ int bus, target, lun;
+ DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+
+ currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+ physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+ logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+ inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+
+ if (!currentsd || !physdev_list || !logdev_list ||
+ !inq_buff || !tmpdevice) {
+ dev_err(&h->pdev->dev, "out of memory\n");
+ goto out;
+ }
+ memset(lunzerobits, 0, sizeof(lunzerobits));
+
+ if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
+ logdev_list, &nlogicals))
+ goto out;
+
+ /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
+ * but each of them 4 times through different paths. The plus 1
+ * is for the RAID controller.
+ */
+ ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
+
+ /* Allocate the per device structures */
+ for (i = 0; i < ndevs_to_allocate; i++) {
+ currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
+ if (!currentsd[i]) {
+ dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
+ __FILE__, __LINE__);
+ goto out;
+ }
+ ndev_allocated++;
+ }
+
+ /* adjust our table of devices */
+ nmsa2xxx_enclosures = 0;
+ for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+ __u8 *lunaddrbytes;
+
+ /* Figure out where the LUN ID info is coming from */
+ if (i < nphysicals)
+ lunaddrbytes = &physdev_list->LUN[i][0];
+ else
+ if (i < nphysicals + nlogicals)
+ lunaddrbytes =
+ &logdev_list->LUN[i-nphysicals][0];
+ else /* jam in the RAID controller at the end */
+ lunaddrbytes = RAID_CTLR_LUNID;
+
+ /* skip masked physical devices. */
+ if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
+ continue;
+
+ /* Get device type, vendor, model, device id */
+ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+ continue; /* skip it if we can't talk to it. */
+ figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
+ tmpdevice);
+ this_device = currentsd[ncurrent];
+
+ /*
+ * For the msa2xxx boxes, we have to insert a LUN 0 which
+ * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
+ * is nonetheless an enclosure device there. We have to
+ * present that otherwise linux won't find anything if
+ * there is no lun 0.
+ */
+ if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
+ lunaddrbytes, bus, target, lun, lunzerobits,
+ &nmsa2xxx_enclosures)) {
+ ncurrent++;
+ this_device = currentsd[ncurrent];
+ }
+
+ *this_device = *tmpdevice;
+ hpsa_set_bus_target_lun(this_device, bus, target, lun);
+
+ switch (this_device->devtype) {
+ case TYPE_ROM: {
+ /* We don't *really* support actual CD-ROM devices,
+ * just "One Button Disaster Recovery" tape drive
+ * which temporarily pretends to be a CD-ROM drive.
+ * So we check that the device is really an OBDR tape
+ * device by checking for "$DR-10" in bytes 43-48 of
+ * the inquiry data.
+ */
+ char obdr_sig[7];
+#define OBDR_TAPE_SIG "$DR-10"
+ strncpy(obdr_sig, &inq_buff[43], 6);
+ obdr_sig[6] = '\0';
+ if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
+ /* Not OBDR device, ignore it. */
+ break;
+ }
+ ncurrent++;
+ break;
+ case TYPE_DISK:
+ if (i < nphysicals)
+ break;
+ ncurrent++;
+ break;
+ case TYPE_TAPE:
+ case TYPE_MEDIUM_CHANGER:
+ ncurrent++;
+ break;
+ case TYPE_RAID:
+ /* Only present the Smartarray HBA as a RAID controller.
+ * If it's a RAID controller other than the HBA itself
+ * (an external RAID controller, MSA500 or similar)
+ * don't present it.
+ */
+ if (!is_hba_lunid(lunaddrbytes))
+ break;
+ ncurrent++;
+ break;
+ default:
+ break;
+ }
+ if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+ break;
+ }
+ adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
+out:
+ kfree(tmpdevice);
+ for (i = 0; i < ndev_allocated; i++)
+ kfree(currentsd[i]);
+ kfree(currentsd);
+ kfree(inq_buff);
+ kfree(physdev_list);
+ kfree(logdev_list);
+ return;
+}
+
+/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+ * dma mapping and fills in the scatter gather entries of the
+ * hpsa command, cp.
+ */
+static int hpsa_scatter_gather(struct pci_dev *pdev,
+ struct CommandList *cp,
+ struct scsi_cmnd *cmd)
+{
+ unsigned int len;
+ struct scatterlist *sg;
+ __u64 addr64;
+ int use_sg, i;
+
+ BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (!use_sg)
+ goto sglist_finished;
+
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (__u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ cp->SG[i].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Len = len;
+ cp->SG[i].Ext = 0; /* we are not chaining */
+ }
+
+sglist_finished:
+
+ cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
+ return 0;
+}
+
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ unsigned char scsi3addr[8];
+ struct CommandList *c;
+ unsigned long flags;
+
+ /* Get the ptr to our adapter structure out of cmd->host. */
+ h = sdev_to_hba(cmd->device);
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
+ memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+ /* Need a lock as this is being allocated from the pool */
+ spin_lock_irqsave(&h->lock, flags);
+ c = cmd_alloc(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* Fill in the command list header */
+
+ cmd->scsi_done = done; /* save this for use by completion code */
+
+ /* save c in case we have to abort it */
+ cmd->host_scribble = (unsigned char *) c;
+
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
+
+ /* Fill in the request block... */
+
+ c->Request.Timeout = 0;
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+ c->Request.CDBLen = cmd->cmd_len;
+ memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+ c->Request.Type.Type = TYPE_CMD;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ c->Request.Type.Direction = XFER_WRITE;
+ break;
+ case DMA_FROM_DEVICE:
+ c->Request.Type.Direction = XFER_READ;
+ break;
+ case DMA_NONE:
+ c->Request.Type.Direction = XFER_NONE;
+ break;
+ case DMA_BIDIRECTIONAL:
+ /* This can happen if a buggy application does a scsi passthru
+ * and sets both inlen and outlen to non-zero. ( see
+ * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+ */
+
+ c->Request.Type.Direction = XFER_RSVD;
+ /* This is technically wrong, and hpsa controllers should
+ * reject it with CMD_INVALID, which is the most correct
+ * response, but non-fibre backends appear to let it
+ * slide by, and give the same results as if this field
+ * were set correctly. Either way is acceptable for
+ * our purposes here.
+ */
+
+ break;
+
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+
+ if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ enqueue_cmd_and_start_io(h, c);
+ /* the cmd'll come back via intr handler in complete_scsi_command() */
+ return 0;
+}
+
+static void hpsa_unregister_scsi(struct ctlr_info *h)
+{
+ /* we are being forcibly unloaded, and may not refuse. */
+ scsi_remove_host(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ h->scsi_host = NULL;
+}
+
+static int hpsa_register_scsi(struct ctlr_info *h)
+{
+ int rc;
+
+ hpsa_update_scsi_devices(h, -1);
+ rc = hpsa_scsi_detect(h);
+ if (rc != 0)
+ dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
+ " hpsa_scsi_detect(), rc is %d\n", rc);
+ return rc;
+}
+
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[])
+{
+ int rc = 0;
+ int count = 0;
+ int waittime = 1; /* seconds */
+ struct CommandList *c;
+
+ c = cmd_special_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "wait_for_device_to_become_ready.\n");
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < HPSA_TUR_RETRY_LIMIT) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ msleep(1000 * waittime);
+ count++;
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready */
+ fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
+ (c->err_info->SenseInfo[2] == NO_SENSE ||
+ c->err_info->SenseInfo[2] == UNIT_ATTENTION))
+ break;
+
+ dev_warn(&h->pdev->dev, "waiting %d secs "
+ "for device to become ready.\n", waittime);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ dev_warn(&h->pdev->dev, "giving up on device.\n");
+ else
+ dev_warn(&h->pdev->dev, "device is ready.\n");
+
+ cmd_special_free(h, c);
+ return rc;
+}
+
+/* Need at least one of these error handlers to keep ../scsi/hosts.c from
+ * complaining. Doing a host- or bus-reset can't do anything good here.
+ */
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ int rc;
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+
+ /* find the controller to which the command to be aborted was sent */
+ h = sdev_to_hba(scsicmd->device);
+ if (h == NULL) /* paranoia */
+ return FAILED;
+ dev_warn(&h->pdev->dev, "resetting drive\n");
+
+ dev = scsicmd->device->hostdata;
+ if (!dev) {
+ dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
+ "device lookup failed.\n");
+ return FAILED;
+ }
+ /* send a reset to the SCSI LUN which the command was sent to */
+ rc = hpsa_send_reset(h, dev->scsi3addr);
+ if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+ return SUCCESS;
+
+ dev_warn(&h->pdev->dev, "resetting device failed.\n");
+ return FAILED;
+}
+
+/*
+ * For operations that cannot sleep, a command block is allocated at init,
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use. Lock must be held when calling this.
+ * cmd_free() is the complement.
+ */
+static struct CommandList *cmd_alloc(struct ctlr_info *h)
+{
+ struct CommandList *c;
+ int i;
+ union u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ do {
+ i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+ if (i == h->nr_cmds)
+ return NULL;
+ } while (test_and_set_bit
+ (i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ c = h->cmd_pool + i;
+ memset(c, 0, sizeof(*c));
+ cmd_dma_handle = h->cmd_pool_dhandle
+ + i * sizeof(*c);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i * sizeof(*c->err_info);
+ h->nr_allocs++;
+
+ c->cmdindex = i;
+
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(*c->err_info);
+
+ c->h = h;
+ return c;
+}
+
+/* For operations that can wait for kmalloc to possibly sleep,
+ * this routine can be called. Lock need not be held to call
+ * cmd_special_alloc. cmd_special_free() is the complement.
+ */
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
+{
+ struct CommandList *c;
+ union u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
+ if (c == NULL)
+ return NULL;
+ memset(c, 0, sizeof(*c));
+
+ c->cmdindex = -1;
+
+ c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
+ &err_dma_handle);
+
+ if (c->err_info == NULL) {
+ pci_free_consistent(h->pdev,
+ sizeof(*c), c, cmd_dma_handle);
+ return NULL;
+ }
+ memset(c->err_info, 0, sizeof(*c->err_info));
+
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(*c->err_info);
+
+ c->h = h;
+ return c;
+}
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c)
+{
+ int i;
+
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ h->nr_frees++;
+}
+
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
+{
+ union u64bit temp64;
+
+ temp64.val32.lower = c->ErrDesc.Addr.lower;
+ temp64.val32.upper = c->ErrDesc.Addr.upper;
+ pci_free_consistent(h->pdev, sizeof(*c->err_info),
+ c->err_info, (dma_addr_t) temp64.val);
+ pci_free_consistent(h->pdev, sizeof(*c),
+ c, (dma_addr_t) c->busaddr);
+}
+
+#ifdef CONFIG_COMPAT
+
+static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = hpsa_ioctl(dev, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ int cmd, void *arg);
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ switch (cmd) {
+ case CCISS_GETPCIINFO:
+ case CCISS_GETINTINFO:
+ case CCISS_SETINTINFO:
+ case CCISS_GETNODENAME:
+ case CCISS_SETNODENAME:
+ case CCISS_GETHEARTBEAT:
+ case CCISS_GETBUSTYPES:
+ case CCISS_GETFIRMVER:
+ case CCISS_GETDRIVVER:
+ case CCISS_REVALIDVOLS:
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+ return do_ioctl(dev, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return hpsa_ioctl32_passthru(dev, cmd, arg);
+ case CCISS_BIG_PASSTHRU32:
+ return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
+{
+ IOCTL32_Command_struct __user *arg32 =
+ (IOCTL32_Command_struct __user *) arg;
+ IOCTL_Command_struct arg64;
+ IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ int cmd, void *arg)
+{
+ BIG_IOCTL32_Command_struct __user *arg32 =
+ (BIG_IOCTL32_Command_struct __user *) arg;
+ BIG_IOCTL_Command_struct arg64;
+ BIG_IOCTL_Command_struct __user *p =
+ compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(arg64.malloc_size, &arg32->malloc_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+#endif
+
+static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ struct hpsa_pci_info pciinfo;
+
+ if (!argp)
+ return -EINVAL;
+ pciinfo.domain = pci_domain_nr(h->pdev->bus);
+ pciinfo.bus = h->pdev->bus->number;
+ pciinfo.dev_fn = h->pdev->devfn;
+ pciinfo.board_id = h->board_id;
+ if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ DriverVer_type DriverVer;
+ unsigned char vmaj, vmin, vsubmin;
+ int rc;
+
+ rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
+ &vmaj, &vmin, &vsubmin);
+ if (rc != 3) {
+ dev_info(&h->pdev->dev, "driver version string '%s' "
+ "unrecognized.", HPSA_DRIVER_VERSION);
+ vmaj = 0;
+ vmin = 0;
+ vsubmin = 0;
+ }
+ DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
+ if (!argp)
+ return -EINVAL;
+ if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ IOCTL_Command_struct iocommand;
+ struct CommandList *c;
+ char *buff = NULL;
+ union u64bit temp64;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
+ return -EFAULT;
+ if ((iocommand.buf_size < 1) &&
+ (iocommand.Request.Type.Direction != XFER_NONE)) {
+ return -EINVAL;
+ }
+ if (iocommand.buf_size > 0) {
+ buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (buff == NULL)
+ return -EFAULT;
+ }
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else
+ memset(buff, 0, iocommand.buf_size);
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ kfree(buff);
+ return -ENOMEM;
+ }
+ /* Fill in the command type */
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (iocommand.buf_size > 0) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal = 1;
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
+ /* use the kernel address the cmd block for tag */
+ c->Header.Tag.lower = c->busaddr;
+
+ /* Fill in Request block */
+ memcpy(&c->Request, &iocommand.Request,
+ sizeof(c->Request));
+
+ /* Fill in the scatter gather information */
+ if (iocommand.buf_size > 0) {
+ temp64.val = pci_map_single(h->pdev, buff,
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Addr.lower = temp64.val32.lower;
+ c->SG[0].Addr.upper = temp64.val32.upper;
+ c->SG[0].Len = iocommand.buf_size;
+ c->SG[0].Ext = 0; /* we are not chaining*/
+ }
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+
+ /* Copy the error information out */
+ memcpy(&iocommand.error_info, c->err_info,
+ sizeof(iocommand.error_info));
+ if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
+ kfree(buff);
+ cmd_special_free(h, c);
+ return -EFAULT;
+ }
+
+ if (iocommand.Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
+ kfree(buff);
+ cmd_special_free(h, c);
+ return -EFAULT;
+ }
+ }
+ kfree(buff);
+ cmd_special_free(h, c);
+ return 0;
+}
+
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ BIG_IOCTL_Command_struct *ioc;
+ struct CommandList *c;
+ unsigned char **buff = NULL;
+ int *buff_size = NULL;
+ union u64bit temp64;
+ BYTE sg_used = 0;
+ int status = 0;
+ int i;
+ __u32 left;
+ __u32 sz;
+ BYTE __user *data_ptr;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ ioc = (BIG_IOCTL_Command_struct *)
+ kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if ((ioc->buf_size < 1) &&
+ (ioc->Request.Type.Direction != XFER_NONE)) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ /* Check kmalloc limits using all SGs */
+ if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+ if (!buff) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
+ if (!buff_size) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ left = ioc->buf_size;
+ data_ptr = ioc->buf;
+ while (left) {
+ sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+ buff_size[sg_used] = sz;
+ buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+ if (buff[sg_used] == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (ioc->Request.Type.Direction == XFER_WRITE) {
+ if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ } else
+ memset(buff[sg_used], 0, sz);
+ left -= sz;
+ data_ptr += sz;
+ sg_used++;
+ }
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+
+ if (ioc->buf_size > 0) {
+ c->Header.SGList = sg_used;
+ c->Header.SGTotal = sg_used;
+ } else {
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
+ c->Header.Tag.lower = c->busaddr;
+ memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
+ if (ioc->buf_size > 0) {
+ int i;
+ for (i = 0; i < sg_used; i++) {
+ temp64.val = pci_map_single(h->pdev, buff[i],
+ buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ c->SG[i].Addr.lower = temp64.val32.lower;
+ c->SG[i].Addr.upper = temp64.val32.upper;
+ c->SG[i].Len = buff_size[i];
+ /* we are not chaining */
+ c->SG[i].Ext = 0;
+ }
+ }
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+ /* Copy the error information out */
+ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
+ if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+ cmd_special_free(h, c);
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if (ioc->Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ BYTE __user *ptr = ioc->buf;
+ for (i = 0; i < sg_used; i++) {
+ if (copy_to_user(ptr, buff[i], buff_size[i])) {
+ cmd_special_free(h, c);
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ ptr += buff_size[i];
+ }
+ }
+ cmd_special_free(h, c);
+ status = 0;
+cleanup1:
+ if (buff) {
+ for (i = 0; i < sg_used; i++)
+ kfree(buff[i]);
+ kfree(buff);
+ }
+ kfree(buff_size);
+ kfree(ioc);
+ return status;
+}
+
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+ (void) check_for_unit_attention(h, c);
+}
+/*
+ * ioctl
+ */
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ struct ctlr_info *h;
+ void __user *argp = (void __user *)arg;
+
+ h = sdev_to_hba(dev);
+
+ switch (cmd) {
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ hpsa_update_scsi_devices(h, dev->host->host_no);
+ return 0;
+ case CCISS_GETPCIINFO:
+ return hpsa_getpciinfo_ioctl(h, argp);
+ case CCISS_GETDRIVVER:
+ return hpsa_getdrivver_ioctl(h, argp);
+ case CCISS_PASSTHRU:
+ return hpsa_passthru_ioctl(h, argp);
+ case CCISS_BIG_PASSTHRU:
+ return hpsa_big_passthru_ioctl(h, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type)
+{
+ int pci_dir = XFER_NONE;
+
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+ if (buff != NULL && size > 0) {
+ c->Header.SGList = 1;
+ c->Header.SGTotal = 1;
+ } else {
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ c->Header.Tag.lower = c->busaddr;
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
+
+ c->Request.Type.Type = cmd_type;
+ if (cmd_type == TYPE_CMD) {
+ switch (cmd) {
+ case HPSA_INQUIRY:
+ /* are we trying to read a vital product page */
+ if (page_code != 0) {
+ c->Request.CDB[1] = 0x01;
+ c->Request.CDB[2] = page_code;
+ }
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_INQUIRY;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
+ case HPSA_REPORT_LOG:
+ case HPSA_REPORT_PHYS:
+ /* Talking to controller so It's a physical command
+ mode = 00 target = 0. Nothing to write.
+ */
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+
+ case HPSA_READ_CAPACITY:
+ c->Request.CDBLen = 10;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ break;
+ case HPSA_CACHE_FLUSH:
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_WRITE;
+ c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0;
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
+ BUG();
+ return;
+ }
+ } else if (cmd_type == TYPE_MSG) {
+ switch (cmd) {
+
+ case HPSA_DEVICE_RESET_MSG:
+ c->Request.CDBLen = 16;
+ c->Request.Type.Type = 1; /* It is a MSG not a CMD */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
+ c->Request.CDB[1] = 0x03; /* Reset target above */
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+ c->Request.CDB[5] = 0x00;
+ c->Request.CDB[6] = 0x00;
+ c->Request.CDB[7] = 0x00;
+ break;
+
+ default:
+ dev_warn(&h->pdev->dev, "unknown message type %d\n",
+ cmd);
+ BUG();
+ }
+ } else {
+ dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+ BUG();
+ }
+
+ switch (c->Request.Type.Direction) {
+ case XFER_READ:
+ pci_dir = PCI_DMA_FROMDEVICE;
+ break;
+ case XFER_WRITE:
+ pci_dir = PCI_DMA_TODEVICE;
+ break;
+ case XFER_NONE:
+ pci_dir = PCI_DMA_NONE;
+ break;
+ default:
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ }
+
+ hpsa_map_one(h->pdev, c, buff, size, pci_dir);
+
+ return;
+}
+
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+ ulong page_base = ((ulong) base) & PAGE_MASK;
+ ulong page_offs = ((ulong) base) - page_base;
+ void __iomem *page_remapped = ioremap(page_base, page_offs + size);
+
+ return page_remapped ? (page_remapped + page_offs) : NULL;
+}
+
+/* Takes cmds off the submission queue and sends them to the hardware,
+ * then puts them on the queue of cmds waiting for completion.
+ */
+static void start_io(struct ctlr_info *h)
+{
+ struct CommandList *c;
+
+ while (!hlist_empty(&h->reqQ)) {
+ c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ /* can't do anything if fifo is full */
+ if ((h->access.fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+
+ /* Get the first entry from the Request Q */
+ removeQ(c);
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+ h->access.submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+ }
+}
+
+static inline unsigned long get_next_completion(struct ctlr_info *h)
+{
+ return h->access.command_completed(h);
+}
+
+static inline int interrupt_pending(struct ctlr_info *h)
+{
+ return h->access.intr_pending(h);
+}
+
+static inline long interrupt_not_for_us(struct ctlr_info *h)
+{
+ return ((h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
+}
+
+static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
+ __u32 raw_tag)
+{
+ if (unlikely(tag_index >= h->nr_cmds)) {
+ dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
+{
+ removeQ(c);
+ if (likely(c->cmd_type == CMD_SCSI))
+ complete_scsi_command(c, 0, raw_tag);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+}
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+{
+ struct ctlr_info *h = dev_id;
+ struct CommandList *c;
+ unsigned long flags;
+ __u32 raw_tag, tag, tag_index;
+ struct hlist_node *tmp;
+
+ if (interrupt_not_for_us(h))
+ return IRQ_NONE;
+ spin_lock_irqsave(&h->lock, flags);
+ while (interrupt_pending(h)) {
+ while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
+ if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
+ tag_index = HPSA_TAG_TO_INDEX(raw_tag);
+ if (bad_tag(h, tag_index, raw_tag))
+ return IRQ_HANDLED;
+ c = h->cmd_pool + tag_index;
+ finish_cmd(c, raw_tag);
+ continue;
+ }
+ tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
+ c = NULL;
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ if (c->busaddr == tag) {
+ finish_cmd(c, raw_tag);
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/* Send a message CDB to the firmware. */
+static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
+{
+ struct Command {
+ struct CommandListHeader CommandHeader;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrorDescriptor;
+ };
+ struct Command *cmd;
+ static const size_t cmd_sz = sizeof(*cmd) +
+ sizeof(cmd->ErrorDescriptor);
+ dma_addr_t paddr64;
+ uint32_t paddr32, tag;
+ void __iomem *vaddr;
+ int i, err;
+
+ vaddr = pci_ioremap_bar(pdev, 0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+
+ /* The Inbound Post Queue only accepts 32-bit physical addresses for the
+ * CCISS commands, so they must be allocated from the lower 4GiB of
+ * memory.
+ */
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ iounmap(vaddr);
+ return -ENOMEM;
+ }
+
+ cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ if (cmd == NULL) {
+ iounmap(vaddr);
+ return -ENOMEM;
+ }
+
+ /* This must fit, because of the 32-bit consistent DMA mask. Also,
+ * although there's no guarantee, we assume that the address is at
+ * least 4-byte aligned (most likely, it's page-aligned).
+ */
+ paddr32 = paddr64;
+
+ cmd->CommandHeader.ReplyQueue = 0;
+ cmd->CommandHeader.SGList = 0;
+ cmd->CommandHeader.SGTotal = 0;
+ cmd->CommandHeader.Tag.lower = paddr32;
+ cmd->CommandHeader.Tag.upper = 0;
+ memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
+
+ cmd->Request.CDBLen = 16;
+ cmd->Request.Type.Type = TYPE_MSG;
+ cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
+ cmd->Request.Type.Direction = XFER_NONE;
+ cmd->Request.Timeout = 0; /* Don't time out */
+ cmd->Request.CDB[0] = opcode;
+ cmd->Request.CDB[1] = type;
+ memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
+ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
+ cmd->ErrorDescriptor.Addr.upper = 0;
+ cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
+
+ writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
+
+ for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
+ tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+ if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
+ break;
+ msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
+ }
+
+ iounmap(vaddr);
+
+ /* we leak the DMA buffer here ... no choice since the controller could
+ * still complete the command.
+ */
+ if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
+ opcode, type);
+ return -ETIMEDOUT;
+ }
+
+ pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+
+ if (tag & HPSA_ERROR_BIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+ opcode, type);
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+ opcode, type);
+ return 0;
+}
+
+#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
+#define hpsa_noop(p) hpsa_message(p, 3, 0)
+
+static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
+{
+/* the #defines are stolen from drivers/pci/msi.h. */
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
+ int pos;
+ u16 control = 0;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos), &control);
+ if (control & PCI_MSI_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI\n");
+ pci_write_config_word(pdev, msi_control_reg(pos),
+ control & ~PCI_MSI_FLAGS_ENABLE);
+ }
+ }
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos), &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev, msi_control_reg(pos),
+ control & ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+
+ return 0;
+}
+
+/* This does a hard reset of the controller using PCI power management
+ * states.
+ */
+static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+{
+ u16 pmcsr, saved_config_space[32];
+ int i, pos;
+
+ dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
+ /* This is very nearly the same thing as
+ *
+ * pci_save_state(pci_dev);
+ * pci_set_power_state(pci_dev, PCI_D3hot);
+ * pci_set_power_state(pci_dev, PCI_D0);
+ * pci_restore_state(pci_dev);
+ *
+ * but we can't use these nice canned kernel routines on
+ * kexec, because they also check the MSI/MSI-X state in PCI
+ * configuration space and do the wrong thing when it is
+ * set/cleared. Also, the pci_save/restore_state functions
+ * violate the ordering requirements for restoring the
+ * configuration space from the CCISS document (see the
+ * comment below). So we roll our own ....
+ */
+
+ for (i = 0; i < 32; i++)
+ pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pos == 0) {
+ dev_err(&pdev->dev,
+ "hpsa_reset_controller: PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ /* Quoting from the Open CISS Specification: "The Power
+ * Management Control/Status Register (CSR) controls the power
+ * state of the device. The normal operating state is D0,
+ * CSR=00h. The software off state is D3, CSR=03h. To reset
+ * the controller, place the interface device in D3 then to
+ * D0, this causes a secondary PCI reset which will reset the
+ * controller."
+ */
+
+ /* enter the D3hot power management state */
+ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D3hot;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ msleep(500);
+
+ /* enter the D0 power management state */
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D0;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ msleep(500);
+
+ /* Restore the PCI configuration space. The Open CISS
+ * Specification says, "Restore the PCI Configuration
+ * Registers, offsets 00h through 60h. It is important to
+ * restore the command register, 16-bits at offset 04h,
+ * last. Do not restore the configuration status register,
+ * 16-bits at offset 06h." Note that the offset is 2*i.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i == 2 || i == 3)
+ continue;
+ pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ }
+ wmb();
+ pci_write_config_word(pdev, 4, saved_config_space[2]);
+
+ return 0;
+}
+
+/*
+ * We cannot read the structure directly, for portability we must use
+ * the io functions.
+ * This is for debug only.
+ */
+#ifdef HPSA_DEBUG
+static void print_cfg_table(struct device *dev, struct CfgTable *tb)
+{
+ int i;
+ char temp_name[17];
+
+ dev_info(dev, "Controller Configuration information\n");
+ dev_info(dev, "------------------------------------\n");
+ for (i = 0; i < 4; i++)
+ temp_name[i] = readb(&(tb->Signature[i]));
+ temp_name[4] = '\0';
+ dev_info(dev, " Signature = %s\n", temp_name);
+ dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
+ dev_info(dev, " Transport methods supported = 0x%x\n",
+ readl(&(tb->TransportSupport)));
+ dev_info(dev, " Transport methods active = 0x%x\n",
+ readl(&(tb->TransportActive)));
+ dev_info(dev, " Requested transport Method = 0x%x\n",
+ readl(&(tb->HostWrite.TransportRequest)));
+ dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntDelay)));
+ dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntCount)));
+ dev_info(dev, " Max outstanding commands = 0x%d\n",
+ readl(&(tb->CmdsOutMax)));
+ dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+ for (i = 0; i < 16; i++)
+ temp_name[i] = readb(&(tb->ServerName[i]));
+ temp_name[16] = '\0';
+ dev_info(dev, " Server Name = %s\n", temp_name);
+ dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
+}
+#endif /* HPSA_DEBUG */
+
+static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+{
+ int i, offset, mem_type, bar_type;
+
+ if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
+ return 0;
+ offset = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
+ if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+ offset += 4;
+ else {
+ mem_type = pci_resource_flags(pdev, i) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ offset += 4; /* 32 bit */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ offset += 8;
+ break;
+ default: /* reserved in PCI 2.2 */
+ dev_warn(&pdev->dev,
+ "base address is invalid\n");
+ return -1;
+ break;
+ }
+ }
+ if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+ return i + 1;
+ }
+ return -1;
+}
+
+/* If MSI/MSI-X is supported by the kernel we will try to enable it on
+ * controllers that are capable. If not, we use IO-APIC mode.
+ */
+
+static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+ struct pci_dev *pdev, __u32 board_id)
+{
+#ifdef CONFIG_PCI_MSI
+ int err;
+ struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
+ {0, 2}, {0, 3}
+ };
+
+ /* Some boards advertise MSI but don't really support it */
+ if ((board_id == 0x40700E11) ||
+ (board_id == 0x40800E11) ||
+ (board_id == 0x40820E11) || (board_id == 0x40830E11))
+ goto default_int_mode;
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ dev_info(&pdev->dev, "MSIX\n");
+ err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
+ if (!err) {
+ h->intr[0] = hpsa_msix_entries[0].vector;
+ h->intr[1] = hpsa_msix_entries[1].vector;
+ h->intr[2] = hpsa_msix_entries[2].vector;
+ h->intr[3] = hpsa_msix_entries[3].vector;
+ h->msix_vector = 1;
+ return;
+ }
+ if (err > 0) {
+ dev_warn(&pdev->dev, "only %d MSI-X vectors "
+ "available\n", err);
+ goto default_int_mode;
+ } else {
+ dev_warn(&pdev->dev, "MSI-X init failed %d\n",
+ err);
+ goto default_int_mode;
+ }
+ }
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+ dev_info(&pdev->dev, "MSI\n");
+ if (!pci_enable_msi(pdev))
+ h->msi_vector = 1;
+ else
+ dev_warn(&pdev->dev, "MSI init failed\n");
+ }
+default_int_mode:
+#endif /* CONFIG_PCI_MSI */
+ /* if we get here we're going to use the default interrupt mode */
+ h->intr[SIMPLE_MODE_INT] = pdev->irq;
+ return;
+}
+
+static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+{
+ ushort subsystem_vendor_id, subsystem_device_id, command;
+ __u32 board_id, scratchpad = 0;
+ __u64 cfg_offset;
+ __u32 cfg_base_addr;
+ __u64 cfg_base_addr_index;
+ int i, prod_index, err;
+
+ subsystem_vendor_id = pdev->subsystem_vendor;
+ subsystem_device_id = pdev->subsystem_device;
+ board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id);
+
+ for (i = 0; i < ARRAY_SIZE(products); i++)
+ if (board_id == products[i].board_id)
+ break;
+
+ prod_index = i;
+
+ if (prod_index == ARRAY_SIZE(products)) {
+ prod_index--;
+ if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
+ !hpsa_allow_any) {
+ dev_warn(&pdev->dev, "unrecognized board ID:"
+ " 0x%08lx, ignoring.\n",
+ (unsigned long) board_id);
+ return -ENODEV;
+ }
+ }
+ /* check to see if controller has been disabled
+ * BEFORE trying to enable it
+ */
+ (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (!(command & 0x02)) {
+ dev_warn(&pdev->dev, "controller appears to be disabled\n");
+ return -ENODEV;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_warn(&pdev->dev, "unable to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, "hpsa");
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+ return err;
+ }
+
+ /* If the kernel supports MSI/MSI-X we will try to enable that,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+ hpsa_interrupt_mode(h, pdev, board_id);
+
+ /* find the memory BAR */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
+ break;
+ }
+ if (i == DEVICE_COUNT_RESOURCE) {
+ dev_warn(&pdev->dev, "no memory BAR found\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
+ * already removed
+ */
+
+ h->vaddr = remap_pci_mem(h->paddr, 0x250);
+
+ /* Wait for the board to become ready. */
+ for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
+ scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ break;
+ msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+ }
+ if (scratchpad != HPSA_FIRMWARE_READY) {
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ /* get the address index number */
+ cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+ cfg_base_addr &= (__u32) 0x0000ffff;
+ cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+ if (cfg_base_addr_index == -1) {
+ dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
+ h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index) + cfg_offset,
+ sizeof(h->cfgtable));
+ h->board_id = board_id;
+
+ /* Query controller for max supported commands: */
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
+ h->product_name = products[prod_index].product_name;
+ h->access = *(products[prod_index].access);
+ /* Allow room for some ioctls */
+ h->nr_cmds = h->max_commands - 4;
+
+ if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+ (readb(&h->cfgtable->Signature[1]) != 'I') ||
+ (readb(&h->cfgtable->Signature[2]) != 'S') ||
+ (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ dev_warn(&pdev->dev, "not a valid CISS config table\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+#ifdef CONFIG_X86
+ {
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+ __u32 prefetch;
+ prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+ prefetch |= 0x100;
+ writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+ }
+#endif
+
+ /* Disabling DMA prefetch for the P600
+ * An ASIC bug may result in a prefetch beyond
+ * physical memory.
+ */
+ if (board_id == 0x3225103C) {
+ __u32 dma_prefetch;
+ dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ dma_prefetch |= 0x8000;
+ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ }
+
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ /* Update the field, and then ring the doorbell */
+ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.)
+ */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ msleep(10);
+ }
+
+#ifdef HPSA_DEBUG
+ print_cfg_table(&pdev->dev, h->cfgtable);
+#endif /* HPSA_DEBUG */
+
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ dev_warn(&pdev->dev, "unable to get board into simple mode\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+ return 0;
+
+err_out_free_res:
+ /*
+ * Deliberately omit pci_disable_device(): it does something nasty to
+ * Smart Array controllers that pci_enable_device does not undo
+ */
+ pci_release_regions(pdev);
+ return err;
+}
+
+static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int i;
+ int dac;
+ struct ctlr_info *h;
+
+ if (number_of_controllers == 0)
+ printk(KERN_INFO DRIVER_NAME "\n");
+ if (reset_devices) {
+ /* Reset the controller with a PCI power-cycle */
+ if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
+ return -ENODEV;
+
+ /* Some devices (notably the HP Smart Array 5i Controller)
+ need a little pause here */
+ msleep(HPSA_POST_RESET_PAUSE_MSECS);
+
+ /* Now try to get the controller to respond to a no-op */
+ for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+ if (hpsa_noop(pdev) == 0)
+ break;
+ else
+ dev_warn(&pdev->dev, "no-op failed%s\n",
+ (i < 11 ? "; re-trying" : ""));
+ }
+ }
+
+ BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -1;
+
+ h->busy_initializing = 1;
+ INIT_HLIST_HEAD(&h->cmpQ);
+ INIT_HLIST_HEAD(&h->reqQ);
+ mutex_init(&h->busy_shutting_down);
+ init_completion(&h->scan_wait);
+ if (hpsa_pci_init(h, pdev) != 0)
+ goto clean1;
+
+ sprintf(h->devname, "hpsa%d", number_of_controllers);
+ h->ctlr = number_of_controllers;
+ number_of_controllers++;
+ h->pdev = pdev;
+
+ /* configure PCI DMA stuff */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dac = 0;
+ else {
+ dev_err(&pdev->dev, "no suitable DMA available\n");
+ goto clean1;
+ }
+
+ /* make sure the board interrupts are off */
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
+ IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
+ dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+ h->intr[SIMPLE_MODE_INT], h->devname);
+ goto clean2;
+ }
+
+ dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+ h->devname, pdev->device, pci_name(pdev),
+ h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+
+ h->cmd_pool_bits =
+ kmalloc(((h->nr_cmds + BITS_PER_LONG -
+ 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
+ h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->cmd_pool),
+ &(h->cmd_pool_dhandle));
+ h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->errinfo_pool),
+ &(h->errinfo_pool_dhandle));
+ if ((h->cmd_pool_bits == NULL)
+ || (h->cmd_pool == NULL)
+ || (h->errinfo_pool == NULL)) {
+ dev_err(&pdev->dev, "out of memory");
+ goto clean4;
+ }
+ spin_lock_init(&h->lock);
+
+ pci_set_drvdata(pdev, h);
+ memset(h->cmd_pool_bits, 0,
+ ((h->nr_cmds + BITS_PER_LONG -
+ 1) / BITS_PER_LONG) * sizeof(unsigned long));
+
+ hpsa_scsi_setup(h);
+
+ /* Turn the interrupts on so we can service requests */
+ h->access.set_intr_mask(h, HPSA_INTR_ON);
+
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+ h->busy_initializing = 0;
+ return 1;
+
+clean4:
+ kfree(h->cmd_pool_bits);
+ if (h->cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->errinfo_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ free_irq(h->intr[SIMPLE_MODE_INT], h);
+clean2:
+clean1:
+ h->busy_initializing = 0;
+ kfree(h);
+ return -1;
+}
+
+static void hpsa_flush_cache(struct ctlr_info *h)
+{
+ char *flush_buf;
+ struct CommandList *c;
+
+ flush_buf = kzalloc(4, GFP_KERNEL);
+ if (!flush_buf)
+ return;
+
+ c = cmd_special_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ goto out_of_memory;
+ }
+ fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
+ if (c->err_info->CommandStatus != 0)
+ dev_warn(&h->pdev->dev,
+ "error flushing cache on controller\n");
+ cmd_special_free(h, c);
+out_of_memory:
+ kfree(flush_buf);
+}
+
+static void hpsa_shutdown(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+
+ h = pci_get_drvdata(pdev);
+ /* Turn board interrupts off and send the flush cache command
+ * sendcmd will turn off interrupt, and send the flush...
+ * To write all data in the battery backed cache to disks
+ */
+ hpsa_flush_cache(h);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ free_irq(h->intr[2], h);
+#ifdef CONFIG_PCI_MSI
+ if (h->msix_vector)
+ pci_disable_msix(h->pdev);
+ else if (h->msi_vector)
+ pci_disable_msi(h->pdev);
+#endif /* CONFIG_PCI_MSI */
+}
+
+static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+
+ if (pci_get_drvdata(pdev) == NULL) {
+ dev_err(&pdev->dev, "unable to remove device \n");
+ return;
+ }
+ h = pci_get_drvdata(pdev);
+ mutex_lock(&h->busy_shutting_down);
+ remove_from_scan_list(h);
+ hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
+ hpsa_shutdown(pdev);
+ iounmap(h->vaddr);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool, h->errinfo_pool_dhandle);
+ kfree(h->cmd_pool_bits);
+ /*
+ * Deliberately omit pci_disable_device(): it does something nasty to
+ * Smart Array controllers that pci_enable_device does not undo
+ */
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_unlock(&h->busy_shutting_down);
+ kfree(h);
+}
+
+static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
+ __attribute__((unused)) pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+
+static struct pci_driver hpsa_pci_driver = {
+ .name = "hpsa",
+ .probe = hpsa_init_one,
+ .remove = __devexit_p(hpsa_remove_one),
+ .id_table = hpsa_pci_device_id, /* id_table */
+ .shutdown = hpsa_shutdown,
+ .suspend = hpsa_suspend,
+ .resume = hpsa_resume,
+};
+
+/*
+ * This is it. Register the PCI driver information for the cards we control
+ * the OS will call our registered routines when it finds one of our cards.
+ */
+static int __init hpsa_init(void)
+{
+ int err;
+ /* Start the scan thread */
+ hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
+ if (IS_ERR(hpsa_scan_thread)) {
+ err = PTR_ERR(hpsa_scan_thread);
+ return -ENODEV;
+ }
+ err = pci_register_driver(&hpsa_pci_driver);
+ if (err)
+ kthread_stop(hpsa_scan_thread);
+ return err;
+}
+
+static void __exit hpsa_cleanup(void)
+{
+ pci_unregister_driver(&hpsa_pci_driver);
+ kthread_stop(hpsa_scan_thread);
+}
+
+module_init(hpsa_init);
+module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 0000000..6bd1949
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,273 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_H
+#define HPSA_H
+
+#include <scsi/scsicam.h>
+
+#define IO_OK 0
+#define IO_ERROR 1
+
+struct ctlr_info;
+
+struct access_method {
+ void (*submit_command)(struct ctlr_info *h,
+ struct CommandList *c);
+ void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
+ unsigned long (*fifo_full)(struct ctlr_info *h);
+ unsigned long (*intr_pending)(struct ctlr_info *h);
+ unsigned long (*command_completed)(struct ctlr_info *h);
+};
+
+struct hpsa_scsi_dev_t {
+ int devtype;
+ int bus, target, lun; /* as presented to the OS */
+ unsigned char scsi3addr[8]; /* as presented to the HW */
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+ unsigned char device_id[16]; /* from inquiry pg. 0x83 */
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
+ unsigned char revision[4]; /* bytes 32-35 of inquiry data */
+ unsigned char raid_level; /* from inquiry page 0xC1 */
+};
+
+struct ctlr_info {
+ int ctlr;
+ char devname[8];
+ char *product_name;
+ char firm_ver[4]; /* Firmware version */
+ struct pci_dev *pdev;
+ __u32 board_id;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ int nr_cmds; /* Number of commands allowed on this controller */
+ struct CfgTable __iomem *cfgtable;
+ int interrupts_enabled;
+ int major;
+ int max_commands;
+ int commands_outstanding;
+ int max_outstanding; /* Debug */
+ int usage_count; /* number of opens all all minor devices */
+# define DOORBELL_INT 0
+# define PERF_MODE_INT 1
+# define SIMPLE_MODE_INT 2
+# define MEMQ_MODE_INT 3
+ unsigned int intr[4];
+ unsigned int msix_vector;
+ unsigned int msi_vector;
+ struct access_method access;
+
+ /* queue and queue Info */
+ struct hlist_head reqQ;
+ struct hlist_head cmpQ;
+ unsigned int Qdepth;
+ unsigned int maxQsinceinit;
+ unsigned int maxSG;
+ spinlock_t lock;
+
+ /* pointers to command and error info pool */
+ struct CommandList *cmd_pool;
+ dma_addr_t cmd_pool_dhandle;
+ struct ErrorInfo *errinfo_pool;
+ dma_addr_t errinfo_pool_dhandle;
+ unsigned long *cmd_pool_bits;
+ int nr_allocs;
+ int nr_frees;
+ int busy_initializing;
+ int busy_scanning;
+ struct mutex busy_shutting_down;
+ struct list_head scan_list;
+ struct completion scan_wait;
+
+ struct Scsi_Host *scsi_host;
+ spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
+ int ndevices; /* number of used elements in .dev[] array. */
+#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
+ struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+};
+#define HPSA_ABORT_MSG 0
+#define HPSA_DEVICE_RESET_MSG 1
+#define HPSA_BUS_RESET_MSG 2
+#define HPSA_HOST_RESET_MSG 3
+#define HPSA_MSG_SEND_RETRY_LIMIT 10
+#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
+
+/* Maximum time in seconds driver will wait for command completions
+ * when polling before giving up.
+ */
+#define HPSA_MAX_POLL_TIME_SECS (20)
+
+/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
+ * how many times to retry TEST UNIT READY on a device
+ * while waiting for it to become ready before giving up.
+ * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
+ * between sending TURs while waiting for a device
+ * to become ready.
+ */
+#define HPSA_TUR_RETRY_LIMIT (20)
+#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
+
+/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
+ * HPSA_BOARD_READY_ITERATIONS are derived from those.
+ */
+#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define HPSA_BOARD_READY_POLL_INTERVAL \
+ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
+#define HPSA_BOARD_READY_ITERATIONS \
+ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_POST_RESET_PAUSE_MSECS (3000)
+#define HPSA_POST_RESET_NOOP_RETRIES (12)
+
+/* Defining the diffent access_menthods */
+/*
+ * Memory mapped FIFO interface (SMART 53xx cards)
+ */
+#define SA5_DOORBELL 0x20
+#define SA5_REQUEST_PORT_OFFSET 0x40
+#define SA5_REPLY_INTR_MASK_OFFSET 0x34
+#define SA5_REPLY_PORT_OFFSET 0x44
+#define SA5_INTR_STATUS 0x30
+#define SA5_SCRATCHPAD_OFFSET 0xB0
+
+#define SA5_CTCFG_OFFSET 0xB4
+#define SA5_CTMEM_OFFSET 0xB8
+
+#define SA5_INTR_OFF 0x08
+#define SA5B_INTR_OFF 0x04
+#define SA5_INTR_PENDING 0x08
+#define SA5B_INTR_PENDING 0x04
+#define FIFO_EMPTY 0xffffffff
+#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
+
+#define HPSA_ERROR_BIT 0x02
+#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
+#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
+#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+
+#define HPSA_INTR_ON 1
+#define HPSA_INTR_OFF 0
+/*
+ Send the command to the hardware
+*/
+static void SA5_submit_command(struct ctlr_info *h,
+ struct CommandList *c)
+{
+#ifdef HPSA_DEBUG
+ printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
+ c->busaddr);
+#endif /* HPSA_DEBUG */
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ h->commands_outstanding++;
+ if (h->commands_outstanding > h->max_outstanding)
+ h->max_outstanding = h->commands_outstanding;
+}
+
+/*
+ * This card is the opposite of the other cards.
+ * 0 turns interrupts on...
+ * 0x08 turns them off...
+ */
+static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+ if (val) { /* Turn interrupts on */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else { /* Turn them off */
+ h->interrupts_enabled = 0;
+ writel(SA5_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+/*
+ * Returns true if fifo is full.
+ *
+ */
+static unsigned long SA5_fifo_full(struct ctlr_info *h)
+{
+ if (h->commands_outstanding >= h->max_commands)
+ return 1;
+ else
+ return 0;
+
+}
+/*
+ * returns value read from hardware.
+ * returns FIFO_EMPTY if there is nothing to read
+ */
+static unsigned long SA5_completed(struct ctlr_info *h)
+{
+ unsigned long register_value
+ = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+
+ if (register_value != FIFO_EMPTY)
+ h->commands_outstanding--;
+
+#ifdef HPSA_DEBUG
+ if (register_value != FIFO_EMPTY)
+ printk(KERN_INFO "hpsa: Read %lx back from board\n",
+ register_value);
+ else
+ printk(KERN_INFO "hpsa: FIFO Empty read\n");
+#endif
+
+ return register_value;
+}
+/*
+ * Returns true if an interrupt is pending..
+ */
+static unsigned long SA5_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value =
+ readl(h->vaddr + SA5_INTR_STATUS);
+#ifdef HPSA_DEBUG
+ printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
+#endif /* HPSA_DEBUG */
+ if (register_value & SA5_INTR_PENDING)
+ return 1;
+ return 0 ;
+}
+
+
+static struct access_method SA5_access = {
+ SA5_submit_command,
+ SA5_intr_mask,
+ SA5_fifo_full,
+ SA5_intr_pending,
+ SA5_completed,
+};
+
+struct board_type {
+ __u32 board_id;
+ char *product_name;
+ struct access_method *access;
+};
+
+
+/* end of old hpsa_scsi.h file */
+
+#endif /* HPSA_H */
+
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 0000000..12d7138
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,326 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_CMD_H
+#define HPSA_CMD_H
+
+/* general boundary defintions */
+#define SENSEINFOBYTES 32 /* may vary between hbas */
+#define MAXSGENTRIES 31
+#define MAXREPLYQS 256
+
+/* Command Status value */
+#define CMD_SUCCESS 0x0000
+#define CMD_TARGET_STATUS 0x0001
+#define CMD_DATA_UNDERRUN 0x0002
+#define CMD_DATA_OVERRUN 0x0003
+#define CMD_INVALID 0x0004
+#define CMD_PROTOCOL_ERR 0x0005
+#define CMD_HARDWARE_ERR 0x0006
+#define CMD_CONNECTION_LOST 0x0007
+#define CMD_ABORTED 0x0008
+#define CMD_ABORT_FAILED 0x0009
+#define CMD_UNSOLICITED_ABORT 0x000A
+#define CMD_TIMEOUT 0x000B
+#define CMD_UNABORTABLE 0x000C
+
+/* Unit Attentions ASC's as defined for the MSA2012sa */
+#define POWER_OR_RESET 0x29
+#define STATE_CHANGED 0x2a
+#define UNIT_ATTENTION_CLEARED 0x2f
+#define LUN_FAILED 0x3e
+#define REPORT_LUNS_CHANGED 0x3f
+
+/* Unit Attentions ASCQ's as defined for the MSA2012sa */
+
+ /* These ASCQ's defined for ASC = POWER_OR_RESET */
+#define POWER_ON_RESET 0x00
+#define POWER_ON_REBOOT 0x01
+#define SCSI_BUS_RESET 0x02
+#define MSA_TARGET_RESET 0x03
+#define CONTROLLER_FAILOVER 0x04
+#define TRANSCEIVER_SE 0x05
+#define TRANSCEIVER_LVD 0x06
+
+ /* These ASCQ's defined for ASC = STATE_CHANGED */
+#define RESERVATION_PREEMPTED 0x03
+#define ASYM_ACCESS_CHANGED 0x06
+#define LUN_CAPACITY_CHANGED 0x09
+
+/* transfer direction */
+#define XFER_NONE 0x00
+#define XFER_WRITE 0x01
+#define XFER_READ 0x02
+#define XFER_RSVD 0x03
+
+/* task attribute */
+#define ATTR_UNTAGGED 0x00
+#define ATTR_SIMPLE 0x04
+#define ATTR_HEADOFQUEUE 0x05
+#define ATTR_ORDERED 0x06
+#define ATTR_ACA 0x07
+
+/* cdb type */
+#define TYPE_CMD 0x00
+#define TYPE_MSG 0x01
+
+/* config space register offsets */
+#define CFG_VENDORID 0x00
+#define CFG_DEVICEID 0x02
+#define CFG_I2OBAR 0x10
+#define CFG_MEM1BAR 0x14
+
+/* i2o space register offsets */
+#define I2O_IBDB_SET 0x20
+#define I2O_IBDB_CLEAR 0x70
+#define I2O_INT_STATUS 0x30
+#define I2O_INT_MASK 0x34
+#define I2O_IBPOST_Q 0x40
+#define I2O_OBPOST_Q 0x44
+#define I2O_DMA1_CFG 0x214
+
+/* Configuration Table */
+#define CFGTBL_ChangeReq 0x00000001l
+#define CFGTBL_AccCmds 0x00000001l
+
+#define CFGTBL_Trans_Simple 0x00000002l
+
+#define CFGTBL_BusType_Ultra2 0x00000001l
+#define CFGTBL_BusType_Ultra3 0x00000002l
+#define CFGTBL_BusType_Fibre1G 0x00000100l
+#define CFGTBL_BusType_Fibre2G 0x00000200l
+struct vals32 {
+ __u32 lower;
+ __u32 upper;
+};
+
+union u64bit {
+ struct vals32 val32;
+ __u64 val;
+};
+
+/* FIXME this is a per controller value (barf!) */
+#define HPSA_MAX_TARGETS_PER_CTLR 16
+#define HPSA_MAX_LUN 256
+#define HPSA_MAX_PHYS_LUN 1024
+
+/* SCSI-3 Commands */
+#pragma pack(1)
+
+#define HPSA_INQUIRY 0x12
+struct InquiryData {
+ __u8 data_byte[36];
+};
+
+#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
+#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+struct ReportLUNdata {
+ __u8 LUNListLength[4];
+ __u32 reserved;
+ __u8 LUN[HPSA_MAX_LUN][8];
+};
+
+struct ReportExtendedLUNdata {
+ __u8 LUNListLength[4];
+ __u8 extended_response_flag;
+ __u8 reserved[3];
+ __u8 LUN[HPSA_MAX_LUN][24];
+};
+
+struct SenseSubsystem_info {
+ __u8 reserved[36];
+ __u8 portname[8];
+ __u8 reserved1[1108];
+};
+
+#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
+struct ReadCapdata {
+ __u8 total_size[4]; /* Total size in blocks */
+ __u8 block_size[4]; /* Size of blocks in bytes */
+};
+
+#if 0
+/* 12 byte commands not implemented in firmware yet. */
+#define HPSA_READ 0xa8
+#define HPSA_WRITE 0xaa
+#endif
+
+#define HPSA_READ 0x28 /* Read(10) */
+#define HPSA_WRITE 0x2a /* Write(10) */
+
+/* BMIC commands */
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_CACHE_FLUSH 0xc2
+#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
+
+/* Command List Structure */
+union SCSI3Addr {
+ struct {
+ __u8 Dev;
+ __u8 Bus:6;
+ __u8 Mode:2; /* b00 */
+ } PeripDev;
+ struct {
+ __u8 DevLSB;
+ __u8 DevMSB:6;
+ __u8 Mode:2; /* b01 */
+ } LogDev;
+ struct {
+ __u8 Dev:5;
+ __u8 Bus:3;
+ __u8 Targ:6;
+ __u8 Mode:2; /* b10 */
+ } LogUnit;
+};
+
+struct PhysDevAddr {
+ __u32 TargetId:24;
+ __u32 Bus:6;
+ __u32 Mode:2;
+ /* 2 level target device addr */
+ union SCSI3Addr Target[2];
+};
+
+struct LogDevAddr {
+ __u32 VolId:30;
+ __u32 Mode:2;
+ __u8 reserved[4];
+};
+
+union LUNAddr {
+ __u8 LunAddrBytes[8];
+ union SCSI3Addr SCSI3Lun[4];
+ struct PhysDevAddr PhysDev;
+ struct LogDevAddr LogDev;
+};
+
+struct CommandListHeader {
+ __u8 ReplyQueue;
+ __u8 SGList;
+ __u16 SGTotal;
+ struct vals32 Tag;
+ union LUNAddr LUN;
+};
+
+struct RequestBlock {
+ __u8 CDBLen;
+ struct {
+ __u8 Type:3;
+ __u8 Attribute:3;
+ __u8 Direction:2;
+ } Type;
+ __u16 Timeout;
+ __u8 CDB[16];
+};
+
+struct ErrDescriptor {
+ struct vals32 Addr;
+ __u32 Len;
+};
+
+struct SGDescriptor {
+ struct vals32 Addr;
+ __u32 Len;
+ __u32 Ext;
+};
+
+union MoreErrInfo {
+ struct {
+ __u8 Reserved[3];
+ __u8 Type;
+ __u32 ErrorInfo;
+ } Common_Info;
+ struct {
+ __u8 Reserved[2];
+ __u8 offense_size; /* size of offending entry */
+ __u8 offense_num; /* byte # of offense 0-base */
+ __u32 offense_value;
+ } Invalid_Cmd;
+};
+struct ErrorInfo {
+ __u8 ScsiStatus;
+ __u8 SenseLen;
+ __u16 CommandStatus;
+ __u32 ResidualCnt;
+ union MoreErrInfo MoreErrInfo;
+ __u8 SenseInfo[SENSEINFOBYTES];
+};
+/* Command types */
+#define CMD_IOCTL_PEND 0x01
+#define CMD_SCSI 0x03
+
+struct ctlr_info; /* defined in hpsa.h */
+/* The size of this structure needs to be divisible by 8
+ * od on all architectures, because the controller uses 2
+ * lower bits of the address, and the driver uses 1 lower
+ * bit (3 bits total.)
+ */
+struct CommandList {
+ struct CommandListHeader Header;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrDesc;
+ struct SGDescriptor SG[MAXSGENTRIES];
+ /* information associated with the command */
+ __u32 busaddr; /* physical addr of this record */
+ struct ErrorInfo *err_info; /* pointer to the allocated mem */
+ struct ctlr_info *h;
+ int cmd_type;
+ long cmdindex;
+ struct hlist_node list;
+ struct CommandList *prev;
+ struct CommandList *next;
+ struct request *rq;
+ struct completion *waiting;
+ int retry_count;
+ void *scsi_cmd;
+};
+
+/* Configuration Table Structure */
+struct HostWrite {
+ __u32 TransportRequest;
+ __u32 Reserved;
+ __u32 CoalIntDelay;
+ __u32 CoalIntCount;
+};
+
+struct CfgTable {
+ __u8 Signature[4];
+ __u32 SpecValence;
+ __u32 TransportSupport;
+ __u32 TransportActive;
+ struct HostWrite HostWrite;
+ __u32 CmdsOutMax;
+ __u32 BusTypes;
+ __u32 Reserved;
+ __u8 ServerName[16];
+ __u32 HeartBeat;
+ __u32 SCSI_Prefetch;
+};
+
+struct hpsa_pci_info {
+ unsigned char bus;
+ unsigned char dev_fn;
+ unsigned short domain;
+ __u32 board_id;
+};
+
+#pragma pack()
+#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8643f50..9e52d16 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
int rc;
ENTER;
+ ioa_cfg->pdev->state_saved = true;
rc = pci_restore_state(ioa_cfg->pdev);
if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 19d711c..7f43647 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
- if (ep->xid <= lport->lro_xid)
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
if (unlikely(lport->tt.frame_send(lport, fp)))
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c4b58d0..6fde2fa 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
/**
* struct fc_fcp_internal - FCP layer internal data
- * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
* @scsi_pkt_queue: Current FCP packets
* @last_can_queue_ramp_down_time: ramp down time
* @last_can_queue_ramp_up_time: ramp up time
* @max_can_queue: max can_queue size
*/
struct fc_fcp_internal {
- mempool_t *scsi_pkt_pool;
- struct list_head scsi_pkt_queue;
- unsigned long last_can_queue_ramp_down_time;
- unsigned long last_can_queue_ramp_up_time;
- int max_can_queue;
+ mempool_t *scsi_pkt_pool;
+ spinlock_t scsi_queue_lock;
+ struct list_head scsi_pkt_queue;
+ unsigned long last_can_queue_ramp_down_time;
+ unsigned long last_can_queue_ramp_up_time;
+ int max_can_queue;
};
#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -296,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
{
struct fc_lport *lport;
- if (!fsp)
- return;
-
lport = fsp->lp;
if ((fsp->req_flags & FC_SRB_READ) &&
(lport->lro_enabled) && (lport->tt.ddp_setup)) {
@@ -410,12 +409,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
unsigned long flags;
fp = fc_frame_alloc(lport, len);
- if (!fp) {
- spin_lock_irqsave(lport->host->host_lock, flags);
- fc_fcp_can_queue_ramp_down(lport);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
- }
- return fp;
+ if (likely(fp))
+ return fp;
+
+ /* error case */
+ spin_lock_irqsave(lport->host->host_lock, flags);
+ fc_fcp_can_queue_ramp_down(lport);
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+ return NULL;
}
/**
@@ -990,7 +991,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
struct scsi_cmnd *sc_cmd;
unsigned long flags;
- spin_lock_irqsave(lport->host->host_lock, flags);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
restart:
list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
sc_cmd = fsp->cmd;
@@ -1001,7 +1002,7 @@ restart:
continue;
fc_fcp_pkt_hold(fsp);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (!fc_fcp_lock_pkt(fsp)) {
fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1011,14 @@ restart:
}
fc_fcp_pkt_release(fsp);
- spin_lock_irqsave(lport->host->host_lock, flags);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
/*
* while we dropped the lock multiple pkts could
* have been released, so we have to start over.
*/
goto restart;
}
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
/**
@@ -1035,11 +1036,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
* @fsp: The FCP packet to send
*
* Return: Zero for success and -1 for failure
- * Locks: Called with the host lock and irqs disabled.
+ * Locks: Called without locks held
*/
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
int rc;
fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1051,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
int_to_scsilun(fsp->cmd->device->lun,
(struct scsi_lun *)fsp->cdb_cmd.fc_lun);
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
- list_add_tail(&fsp->list, &si->scsi_pkt_queue);
- spin_unlock_irq(lport->host->host_lock);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
- spin_lock_irq(lport->host->host_lock);
- if (rc)
+ if (unlikely(rc)) {
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ }
return rc;
}
@@ -1752,6 +1757,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
struct fcoe_dev_stats *stats;
lport = shost_priv(sc_cmd->device->host);
+ spin_unlock_irq(lport->host->host_lock);
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -1834,6 +1840,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
+ spin_lock_irq(lport->host->host_lock);
return rc;
}
EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1871,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
lport = fsp->lp;
si = fc_get_scsi_internal(lport);
- spin_lock_irqsave(lport->host->host_lock, flags);
- if (!fsp->cmd) {
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ if (!fsp->cmd)
return;
- }
/*
* if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1884,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd = fsp->cmd;
fsp->cmd = NULL;
- if (!sc_cmd->SCp.ptr) {
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ if (!sc_cmd->SCp.ptr)
return;
- }
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
switch (fsp->status_code) {
@@ -1945,10 +1947,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
break;
}
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
@@ -2216,6 +2219,7 @@ int fc_fcp_init(struct fc_lport *lport)
lport->scsi_priv = si;
si->max_can_queue = lport->host->can_queue;
INIT_LIST_HEAD(&si->scsi_pkt_queue);
+ spin_lock_init(&si->scsi_queue_lock);
si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
if (!si->scsi_pkt_pool) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 74338c8..7ec8ce7 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
int rc = -1;
mutex_lock(&lport->lp_mutex);
- if (lport->state == LPORT_ST_DISABLED) {
+ if (lport->state == LPORT_ST_DISABLED ||
+ lport->state == LPORT_ST_LOGO) {
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
fc_lport_enter_reset(lport);
rc = 0;
}
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
+ if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
+ return;
+
if (lport->vport) {
if (lport->link_up)
fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
@@ -1795,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
u32 did;
job->reply->reply_payload_rcv_len = 0;
- rsp->resid_len = job->reply_payload.payload_len;
+ if (rsp)
+ rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 35ca0e7..97923bb 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
restart = 1;
else
list_del(&rdata->peers);
+ rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
mutex_unlock(&lport->disc.disc_mutex);
}
@@ -622,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(plp->fl_csp.sp_e_d_tov);
if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
- tov /= 1000;
+ tov /= 1000000;
if (tov > rdata->e_d_tov)
rdata->e_d_tov = tov;
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b7689f3..c28a712 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -517,7 +517,7 @@ static void iscsi_free_task(struct iscsi_task *task)
if (conn->login_task == task)
return;
- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+ kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
if (sc) {
task->sc = NULL;
@@ -737,7 +737,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!kfifo_out(&session->cmdpool.queue,
(void*)&task, sizeof(void*)))
return NULL;
}
@@ -1567,7 +1567,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
{
struct iscsi_task *task;
- if (!__kfifo_get(conn->session->cmdpool.queue,
+ if (!kfifo_out(&conn->session->cmdpool.queue,
(void *) &task, sizeof(void *)))
return NULL;
@@ -2461,12 +2461,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
if (q->pool == NULL)
return -ENOMEM;
- q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (IS_ERR(q->queue)) {
- q->queue = NULL;
- goto enomem;
- }
+ kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
for (i = 0; i < max; i++) {
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
@@ -2474,7 +2469,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
q->max = i;
goto enomem;
}
- __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
}
if (items) {
@@ -2497,7 +2492,6 @@ void iscsi_pool_free(struct iscsi_pool *q)
for (i = 0; i < q->max; i++)
kfree(q->pool[i]);
kfree(q->pool);
- kfree(q->queue);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
@@ -2825,7 +2819,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
/* allocate login_task used for the login/text sequences */
spin_lock_bh(&session->lock);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!kfifo_out(&session->cmdpool.queue,
(void*)&conn->login_task,
sizeof(void*))) {
spin_unlock_bh(&session->lock);
@@ -2845,7 +2839,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
return cls_conn;
login_task_data_alloc_fail:
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
iscsi_destroy_conn(cls_conn);
@@ -2908,7 +2902,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
if (session->leadconn == conn)
session->leadconn = NULL;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index ca25ee5..4ad87fd 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -445,15 +445,15 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
return;
/* flush task's r2t queues */
- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
}
r2t = tcp_task->r2t;
if (r2t != NULL) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
tcp_task->r2t = NULL;
}
@@ -541,7 +541,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return 0;
}
- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
if (!rc) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
"Target has sent more R2Ts than it "
@@ -554,7 +554,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
if (r2t->data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -570,7 +570,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_out(task->sc)->length);
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -580,7 +580,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
r2t->sent = 0;
tcp_task->exp_datasn = r2tsn + 1;
- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+ kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
iscsi_requeue_task(task);
@@ -951,7 +951,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
return conn->session->tt->init_pdu(task, 0, task->data_count);
}
- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+ BUG_ON(kfifo_len(&tcp_task->r2tqueue));
tcp_task->exp_datasn = 0;
/* Prepare PDU, optionally w/ immediate data */
@@ -982,7 +982,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
if (r2t->data_length <= r2t->sent) {
ISCSI_DBG_TCP(task->conn,
" done with r2t %p\n", r2t);
- __kfifo_put(tcp_task->r2tpool.queue,
+ kfifo_in(&tcp_task->r2tpool.queue,
(void *)&tcp_task->r2t,
sizeof(void *));
tcp_task->r2t = r2t = NULL;
@@ -990,9 +990,12 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
}
if (r2t == NULL) {
- __kfifo_get(tcp_task->r2tqueue,
- (void *)&tcp_task->r2t, sizeof(void *));
- r2t = tcp_task->r2t;
+ if (kfifo_out(&tcp_task->r2tqueue,
+ (void *)&tcp_task->r2t, sizeof(void *)) !=
+ sizeof(void *))
+ r2t = NULL;
+ else
+ r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
}
@@ -1127,9 +1130,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
}
/* R2T xmit queue */
- tcp_task->r2tqueue = kfifo_alloc(
- session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+ if (kfifo_alloc(&tcp_task->r2tqueue,
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
@@ -1142,7 +1144,7 @@ r2t_alloc_fail:
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
return -ENOMEM;
@@ -1157,7 +1159,7 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
}
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 9ad38e8..ab19b3b 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -58,19 +58,15 @@ static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
goto free_pool;
spin_lock_init(&q->lock);
- q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
- GFP_KERNEL, &q->lock);
- if (IS_ERR(q->queue))
- goto free_item;
+ kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
for (i = 0, iue = q->items; i < max; i++) {
- __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
+ kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
iue->sbuf = ring[i];
iue++;
}
return 0;
-free_item:
kfree(q->items);
free_pool:
kfree(q->pool);
@@ -167,7 +163,11 @@ struct iu_entry *srp_iu_get(struct srp_target *target)
{
struct iu_entry *iue = NULL;
- kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
+ sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ return NULL;
+ }
if (!iue)
return iue;
iue->target = target;
@@ -179,7 +179,8 @@ EXPORT_SYMBOL_GPL(srp_iu_get);
void srp_iu_put(struct iu_entry *iue)
{
- kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue,
+ sizeof(void *), &iue->target->iu_queue.lock);
}
EXPORT_SYMBOL_GPL(srp_iu_put);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ce52270..2cc3968 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
if (vport->fc_rscn_flush) {
/* Another thread is walking fc_rscn_id_list on this vport */
- spin_unlock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0;
@@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_initial_fdisc(vport);
break;
}
-
} else {
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
if (vport == phba->pport)
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3b94244..2445e39 100755..100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba)
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
+
+ /* Block all SCSI stack I/Os */
+ lpfc_scsi_dev_block(phba);
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
if (phba->link_state > LPFC_LINK_DOWN) {
@@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* to book keeping the FCFIs can be used.
*/
if (shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2521 READ_FCF_RECORD mailbox failed "
- "with status x%x add_status x%x, mbx\n",
- shdr_status, shdr_add_status);
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2726 READ_FCF_RECORD Indicates empty "
+ "FCF table.\n");
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2521 READ_FCF_RECORD mailbox failed "
+ "with status x%x add_status x%x, mbx\n",
+ shdr_status, shdr_add_status);
+ }
goto out;
}
/* Interpreting the returned information of FCF records */
@@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
+ spin_lock_irq(&phba->hbalock);
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(&phba->hbalock);
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vport);
@@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxStatus);
break;
}
+ spin_lock_irq(&phba->hbalock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
/*
@@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_mbx_unreg_vpi(vports[i]);
+ spin_lock_irq(&phba->hbalock);
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_destroy_vport_work_array(phba, vports);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1585148..8a2a1c5 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy {
};
#define LPFC_HDR_BUF_SIZE 128
-#define LPFC_DATA_BUF_SIZE 4096
+#define LPFC_DATA_BUF_SIZE 2048
struct rq_context {
uint32_t word0;
#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg {
#define STATUS_ERROR_ACITMAIN 0x2a
#define STATUS_REBOOT_REQUIRED 0x2c
#define STATUS_FCF_IN_USE 0x3a
+#define STATUS_FCF_TABLE_EMPTY 0x43
struct lpfc_mbx_sli4_config {
struct mbox_header header;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 226920d..b8eb1b6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
+ uint32_t link_state;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
@@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break;
/*
* Currently, driver support only one FCF - so treat this as
- * a link down.
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
*/
+ link_state = phba->link_state;
lpfc_linkdown(phba);
+ phba->link_state = link_state;
/* Unregister FCF if no devices connected to it */
lpfc_unregister_unused_fcf(phba);
break;
@@ -4506,9 +4510,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
return error;
+ }
+ }
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -6021,9 +6029,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
return error;
+ }
+ }
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
* number of bytes required by each mapping. They are actually
@@ -7218,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7248,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7499,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
+ /* Default to single FCP EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ phba->cfg_fcp_eq_count = 1;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7935667..589549b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
- .entry_count = 200,
+ .entry_count = 256,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_ELS_RING),
@@ -1482,8 +1482,11 @@ err:
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->add_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count);
}
/**
@@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->init_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->entry_count);
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count);
}
/**
@@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc) {
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
return -EIO;
}
@@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.un.ulpWord[3]);
wqe->generic.word3 = 0;
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
- bf_set(wqe_xc, &wqe->generic, 1);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
return dmabuf;
}
temp_hdr = seq_dmabuf->hbuf.virt;
- if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_del_init(&seq_dmabuf->hbuf.list);
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
@@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
+ if (list_empty(&seq_dmabuf->dbuf.list)) {
+ temp_hdr = dmabuf->hbuf.virt;
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
/* find the correct place in the sequence to insert this frame */
list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
* If the frame's sequence count is greater than the frame on
* the list then insert the frame right after this frame
*/
- if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* If there is a hole in the sequence count then fail. */
- if (++seq_count != hdr->fh_seq_cnt)
+ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
@@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length,
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
@@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
return;
}
/* If not last frame in sequence continue processing frames. */
- if (!lpfc_seq_complete(seq_dmabuf)) {
- /*
- * When saving off frames post a new one and mark this
- * frame to be freed when it is finished.
- **/
- lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
- dmabuf->tag = -1;
+ if (!lpfc_seq_complete(seq_dmabuf))
return;
- }
+
/* Send the complete sequence to the upper layer protocol */
lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 25d66d0..44e5f57 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -28,7 +28,7 @@
/* Multi-queue arrangement for fast-path FCP work queues */
#define LPFC_FN_EQN_MAX 8
#define LPFC_SP_EQN_DEF 1
-#define LPFC_FP_EQN_DEF 1
+#define LPFC_FP_EQN_DEF 4
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7f3aed..792f722 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.6"
+#define LPFC_DRIVER_VERSION "8.3.7"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 7d6dd83..e3c7fa6 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
+ spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
@@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
spin_unlock_irq(&phba->ndlp_lock);
}
- if (vport->vpi_state != LPFC_VPI_REGISTERED)
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 134c63e..d9b8ca5 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
instance->base_addr = pci_resource_start(instance->pdev, 0);
}
- if (pci_request_regions(instance->pdev, "megasas: LSI")) {
+ if (pci_request_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM),
+ "megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_regions(instance->pdev);
+ pci_release_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM));
return -EINVAL;
}
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_regions(instance->pdev);
+ pci_release_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM));
}
/**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
- rval = pci_enable_device(pdev);
+ rval = pci_enable_device_mem(pdev);
if (rval) {
return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
- rval = pci_enable_device(pdev);
+ rval = pci_enable_device_mem(pdev);
if (rval) {
printk(KERN_ERR "megasas: Enable device failed\n");
@@ -3777,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
compat_alloc_user_space(sizeof(struct megasas_iocpacket));
int i;
int error = 0;
+ compat_uptr_t ptr;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -3789,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
return -EFAULT;
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- compat_uptr_t ptr;
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
if (get_user(ptr, &cioc->sgl[i].iov_base) ||
put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
copy_in_user(&ioc->sgl[i].iov_len,
@@ -4042,7 +4060,7 @@ megasas_aen_polling(struct work_struct *work)
}
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
megasas_sysfs_show_poll_mode_io,
megasas_sysfs_set_poll_mode_io);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6422e25..89d0240 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->transport_cmds.mutex);
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
/* task management internal command bits */
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45..cae6b2c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
{ } /* terminate list */
};
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 950202a..2422347 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
seg->alloc_size = 0;
}
-static void _put_request(struct request *rq , bool is_async)
+static void _put_request(struct request *rq)
{
- if (is_async) {
- WARN_ON(rq->bio);
- __blk_put_request(rq->q, rq);
- } else {
- /*
- * If osd_finalize_request() was called but the request was not
- * executed through the block layer, then we must release BIOs.
- * TODO: Keep error code in or->async_error. Need to audit all
- * code paths.
- */
- if (unlikely(rq->bio))
- blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
- else
- blk_put_request(rq);
- }
+ /*
+ * If osd_finalize_request() was called but the request was not
+ * executed through the block layer, then we must release BIOs.
+ * TODO: Keep error code in or->async_error. Need to audit all
+ * code paths.
+ */
+ if (unlikely(rq->bio))
+ blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+ else
+ blk_put_request(rq);
}
void osd_end_request(struct osd_request *or)
{
struct request *rq = or->request;
- /* IMPORTANT: make sure this agrees with osd_execute_request_async */
- bool is_async = (or->request->end_io_data == or);
_osd_free_seg(or, &or->set_attr);
_osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
if (rq) {
if (rq->next_rq) {
- _put_request(rq->next_rq, is_async);
+ _put_request(rq->next_rq);
rq->next_rq = NULL;
}
- _put_request(rq, is_async);
+ _put_request(rq);
}
_osd_request_free(or);
}
EXPORT_SYMBOL(osd_end_request);
+static void _set_error_resid(struct osd_request *or, struct request *req,
+ int error)
+{
+ or->async_error = error;
+ or->req_errors = req->errors ? : error;
+ or->sense_len = req->sense_len;
+ if (or->out.req)
+ or->out.residual = or->out.req->resid_len;
+ if (or->in.req)
+ or->in.residual = or->in.req->resid_len;
+}
+
int osd_execute_request(struct osd_request *or)
{
- return or->async_error =
- blk_execute_rq(or->request->q, NULL, or->request, 0);
+ int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
+
+ _set_error_resid(or, or->request, error);
+ return error;
}
EXPORT_SYMBOL(osd_execute_request);
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
{
struct osd_request *or = req->end_io_data;
- or->async_error = error;
-
- if (unlikely(error)) {
- OSD_DEBUG("osd_request_async_done error recieved %d "
- "errors 0x%x\n", error, req->errors);
- if (!req->errors) /* don't miss out on this one */
- req->errors = error;
+ _set_error_resid(or, req, error);
+ if (req->next_rq) {
+ __blk_put_request(req->q, req->next_rq);
+ req->next_rq = NULL;
}
+ __blk_put_request(req->q, req);
+ or->request = NULL;
+ or->in.req = NULL;
+ or->out.req = NULL;
+
if (or->async_done)
or->async_done(or, or->async_private);
else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
#endif
int ret;
- if (likely(!or->request->errors)) {
- osi->out_resid = 0;
- osi->in_resid = 0;
+ if (likely(!or->req_errors))
return 0;
- }
osi = osi ? : &local_osi;
memset(osi, 0, sizeof(*osi));
- ssdb = or->request->sense;
- sense_len = or->request->sense_len;
+ ssdb = (typeof(ssdb))or->sense;
+ sense_len = or->sense_len;
if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
OSD_ERR("Block-layer returned error(0x%x) but "
"sense_len(%u) || key(%d) is empty\n",
- or->request->errors, sense_len, ssdb->sense_key);
+ or->req_errors, sense_len, ssdb->sense_key);
goto analyze;
}
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
"additional_code=0x%x async_error=%d errors=0x%x\n",
osi->key, original_sense_len, sense_len,
osi->additional_code, or->async_error,
- or->request->errors);
+ or->req_errors);
if (original_sense_len < sense_len)
sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ analyze:
ret = -EIO;
}
- if (or->out.req)
- osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes;
- if (or->in.req)
- osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes;
+ if (!or->out.residual)
+ or->out.residual = or->out.total_bytes;
+ if (!or->in.residual)
+ or->in.residual = or->in.total_bytes;
return ret;
}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 22644de..63ad4aa 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,16 +45,6 @@
#define HEADER_LEN 28
#define SIZE_OFFSET 16
-struct pm8001_ioctl_payload {
- u32 signature;
- u16 major_function;
- u16 minor_function;
- u16 length;
- u16 status;
- u16 offset;
- u16 id;
- u8 func_specific[1];
-};
#define FLASH_OK 0x000000
#define FAIL_OPEN_BIOS_FILE 0x000100
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a3de306..9b44c6f 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
static void __devinit
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
{
- u32 offset;
- u32 value;
- u32 i, j;
- u32 bit_cnt;
+ u32 value, offset, i;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
*/
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
return;
- /* set SSC bit of PHY 0 - 3 */
+
for (i = 0; i < 4; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
- value = pm8001_cr32(pm8001_ha, 2, offset);
- if (SSCbit) {
- value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
- } else {
- value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
- }
- bit_cnt = 0;
- for (j = 0; j < 31; j++)
- if ((value >> j) & 0x00000001)
- bit_cnt++;
- if (bit_cnt % 2)
- value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
- else
- value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
- pm8001_cw32(pm8001_ha, 2, offset, value);
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
-
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
return;
-
- /* set SSC bit of PHY 4 - 7 */
for (i = 4; i < 8; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
- value = pm8001_cr32(pm8001_ha, 2, offset);
- if (SSCbit) {
- value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
- } else {
- value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
- }
- bit_cnt = 0;
- for (j = 0; j < 31; j++)
- if ((value >> j) & 0x00000001)
- bit_cnt++;
- if (bit_cnt % 2)
- value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
- else
- value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
- pm8001_cw32(pm8001_ha, 2, offset, value);
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
+ /*************************************************************
+ Change the SSC upspreading value to 0x0 so that upspreading is disabled.
+ Device MABC SMOD0 Controls
+ Address: (via MEMBASE-III):
+ Using shifted destination address 0x0_0000: with Offset 0xD8
+
+ 31:28 R/W Reserved Do not change
+ 27:24 R/W SAS_SMOD_SPRDUP 0000
+ 23:20 R/W SAS_SMOD_SPRDDN 0000
+ 19:0 R/W Reserved Do not change
+ Upon power-up this register will read as 0x8990c016,
+ and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
+ so that the written value will be 0x8090c016.
+ This will ensure only down-spreading SSC is enabled on the SPC.
+ *************************************************************/
+ value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+ pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
- unsigned long flags;
+ unsigned long flags = 0;
u32 param;
u32 status;
u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*in order to force CPU ordering*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
+ } else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ } else if (!t->uldd_task) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
}
}
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
{
struct sas_task *t;
- unsigned long flags;
+ unsigned long flags = 0;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
+ } else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
+ mb();/* ditto */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ } else if (!t->uldd_task) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
}
}
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
u8 link_rate =
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
u8 phy_id =
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
-
+ port->port_state = portstate;
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
+ pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id));
switch (deviceType) {
case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("expander device.\n"));
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("fanout expander device.\n"));
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
u8 link_rate =
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
u8 phy_id =
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
+ " phy id = %d\n", port_id, phy_id));
+ port->port_state = portstate;
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
-
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ port->port_state = portstate;
+ phy->phy_type = 0;
+ phy->identify.device_type = 0;
+ phy->phy_attached = 0;
+ memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
switch (portstate) {
case PORT_VALID:
break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" PortInvalid portID %d \n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
case PORT_IN_RESET:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInReset portID %d \n", port_id));
+ pm8001_printk(" Port In Reset portID %d \n", port_id));
break;
case PORT_NOT_ESTABLISHED:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ port->port_attached = 0;
break;
case PORT_LOSTCOMM:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
default:
+ port->port_attached = 0;
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
- ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
+ ssp_cmd.dir_m_tlr =
+ cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
SAS 1.1 compatible TLR*/
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
- ncg_tag = cpu_to_le32(hdr_tag);
+ ncg_tag = hdr_tag;
dir = data_dir_flags[task->data_dir] << 8;
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
((stp_sspsmp_sata & 0x03) * 0x10000000));
payload.firstburstsize_ITNexustimeout =
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
- memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
+ memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ;
int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
- memset((u8 *)&payload, 0, sizeof(payload));
+ memset(&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = 1;
payload.phyop_phyid =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 96e4daa..833a520 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -242,8 +242,7 @@ struct reg_dev_req {
__le32 phyid_portid;
__le32 dtype_dlr_retry;
__le32 firstburstsize_ITNexustimeout;
- u32 sas_addr_hi;
- u32 sas_addr_low;
+ u8 sas_addr[SAS_ADDR_SIZE];
__le32 upper_device_id;
u32 reserved[8];
} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 42ebe72..c2f1032 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
{
int i;
spin_lock_init(&pm8001_ha->lock);
- for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
+ pm8001_ha->port[i].wide_port_phymap = 0;
+ pm8001_ha->port[i].port_attached = 0;
+ pm8001_ha->port[i].port_state = 0;
+ INIT_LIST_HEAD(&pm8001_ha->port[i].list);
+ }
pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
u8 i;
#ifdef PM8001_READ_VPD
DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
- PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
+ payload.minor_function = 0;
+ payload.length = 128;
+ payload.func_specific = kzalloc(128, GFP_KERNEL);
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy %d sas_addr = %x \n", i,
- (u64)pm8001_ha->phy[i].dev_sas_addr));
+ pm8001_printk("phy %d sas_addr = %016llx \n", i,
+ pm8001_ha->phy[i].dev_sas_addr));
}
#else
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
- pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
+ pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
pm8001_ha->phy[i].dev_sas_addr =
cpu_to_be64((u64)
(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 1f767a0..7f9c83a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
}
return 0;
}
+ /* Find the local port id that's attached to this device */
+static int sas_find_local_port_id(struct domain_device *dev)
+{
+ struct domain_device *pdev = dev->parent;
+
+ /* Directly attached device */
+ if (!pdev)
+ return dev->port->id;
+ while (pdev) {
+ struct domain_device *pdev_p = pdev->parent;
+ if (!pdev_p)
+ return pdev->port->id;
+ pdev = pdev->parent;
+ }
+ return 0;
+}
+
/**
* pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
* @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
struct domain_device *dev = task->dev;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
+ struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num;
- unsigned long flags = 0;
+ unsigned long flags = 0, flags_libsas = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
rc = SAS_PHY_DOWN;
goto out_done;
}
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ if (!port->port_attached) {
+ if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+ flags_libsas);
+ t->task_done(t);
+ spin_lock_irqsave(dev->sata_dev.ap->lock,
+ flags_libsas);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
+ } else {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ t->task_done(t);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
+ }
+ }
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_device = pm8001_alloc_dev(pm8001_ha);
- pm8001_device->sas_device = dev;
if (!pm8001_device) {
res = -1;
goto found_out;
}
+ pm8001_device->sas_device = dev;
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
wait_for_completion(&completion);
if (dev->dev_type == SAS_END_DEV)
msleep(50);
- pm8001_ha->flags = PM8001F_RUN_TIME ;
+ pm8001_ha->flags |= PM8001F_RUN_TIME ;
return 0;
found_out:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
task->task_done = pm8001_task_done;
task->timer.data = (unsigned long)task;
task->timer.function = pm8001_tmf_timedout;
- task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+ task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->timer);
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
if (dev_is_sata(dev)) {
DECLARE_COMPLETION_ONSTACK(completion_setstate);
+ if (scsi_is_sas_phy_local(phy))
+ return 0;
rc = sas_phy_reset(phy, 1);
msleep(2000);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 30f2ede..8e38ca8 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -59,11 +59,11 @@
#define DRV_NAME "pm8001"
#define DRV_VERSION "0.1.36"
-#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */
+#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
-#define PM8001_EH_LOGGING 0x10 /* Error message logging */
+#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do { \
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
+#define PM8001_READ_VPD
#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
-struct pm8001_tmf_task;
+/* define task management IU */
+struct pm8001_tmf_task {
+ u8 tmf;
+ u32 tag_of_task_to_be_managed;
+};
+struct pm8001_ioctl_payload {
+ u32 signature;
+ u16 major_function;
+ u16 minor_function;
+ u16 length;
+ u16 status;
+ u16 offset;
+ u16 id;
+ u8 *func_specific;
+};
+
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
struct pm8001_port {
struct asd_sas_port sas_port;
+ u8 port_attached;
+ u8 wide_port_phymap;
+ u8 port_state;
+ struct list_head list;
};
struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
__be32 startup_entry;
} __attribute__((packed, aligned(4)));
-/* define task management IU */
-struct pm8001_tmf_task {
- u8 tmf;
- u32 tag_of_task_to_be_managed;
-};
+
/**
* FW Flash Update status values
*/
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 34c6b89..b6f1ef9 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
/*
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
*
- * Written By: PMC Sierra Corporation
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
*
* Copyright (C) 2008, 2009 PMC Sierra Inc
*
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
* Module parameters
*/
-MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
+MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
- /* do not expose VSETs with order-ids >= 240 */
+ /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
if (RES_IS_VSET(temp->cfg_entry)) {
target = temp->cfg_entry.unique_flags1;
- if (target >= PMCRAID_MAX_VSET_TARGETS)
+ if (target > PMCRAID_MAX_VSET_TARGETS)
continue;
bus = PMCRAID_VSET_BUS_ID;
lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
int retval = 0;
if (cfgte->resource_type == RES_TYPE_VSET)
- retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
+ retval = ((cfgte->unique_flags1 & 0x80) == 0);
else if (cfgte->resource_type == RES_TYPE_GSCSI)
retval = (RES_BUS(cfgte->resource_address) !=
PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
* Return value:
* none
*/
+
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
{
struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cfgcmd;
struct pmcraid_resource_entry *res = NULL;
- u32 new_entry = 1;
unsigned long lock_flags;
unsigned long host_lock_flags;
+ u32 new_entry = 1;
+ u32 hidden_entry = 0;
int rc;
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
}
/* If this resource is not going to be added to mid-layer, just notify
- * applications and return
+ * applications and return. If this notification is about hiding a VSET
+ * resource, check if it was exposed already.
*/
- if (!pmcraid_expose_resource(cfg_entry))
+ if (pinstance->ccn.hcam->notification_type ==
+ NOTIFICATION_TYPE_ENTRY_CHANGED &&
+ cfg_entry->resource_type == RES_TYPE_VSET &&
+ cfg_entry->unique_flags1 & 0x80) {
+ hidden_entry = 1;
+ } else if (!pmcraid_expose_resource(cfg_entry))
goto out_notify_apps;
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
if (new_entry) {
+ if (hidden_entry) {
+ spin_unlock_irqrestore(&pinstance->resource_lock,
+ lock_flags);
+ goto out_notify_apps;
+ }
+
/* If there are more number of resources than what driver can
* manage, do not notify the applications about the CCN. Just
* ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
sizeof(struct pmcraid_config_table_entry));
if (pinstance->ccn.hcam->notification_type ==
- NOTIFICATION_TYPE_ENTRY_DELETED) {
+ NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
if (res->scsi_dev) {
+ res->cfg_entry.unique_flags1 &= 0x7F;
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
@@ -2467,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
sense_copied = 1;
}
- if (RES_IS_GSCSI(res->cfg_entry)) {
+ if (RES_IS_GSCSI(res->cfg_entry))
pmcraid_cancel_all(cmd, sense_copied);
- } else if (sense_copied) {
+ else if (sense_copied)
pmcraid_erp_done(cmd);
- return 0;
- } else {
+ else
pmcraid_request_sense(cmd);
- }
return 1;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 2752b56..92f89d5 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
/*
* pmcraid.h -- PMC Sierra MaxRAID controller driver header file
*
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
+ *
* Copyright (C) 2008, 2009 PMC Sierra Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
#define PMCRAID_VSET_LUN_ID 0x0
#define PMCRAID_PHYS_BUS_ID 0x0
#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
-#define PMCRAID_MAX_VSET_TARGETS 240
+#define PMCRAID_MAX_VSET_TARGETS 0x7F
#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
#define PMCRAID_IOA_MAX_SECTORS 32767
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 21e2bc4..3a9f5b2 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
if (off)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
return -EINVAL;
if (start > ha->optrom_size)
@@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
uint8_t *tmp_data;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
- rval = qla2x00_get_firmware_state(vha, state);
+ if (!vha->hw->flags.eeh_busy)
+ rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
@@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
- else
- qla2x00_abort_fcport_cmds(fcport);
+ return;
+ }
/*
* Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (!fcport)
return;
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
return;
@@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd7..d6d9c86 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
-/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
+/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/*
* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
#else
#define DEBUG16(x) do {} while (0)
#endif
+
+#if defined(QL_DEBUG_LEVEL_17)
+#define DEBUG17(x) do {x;} while (0)
+#else
+#define DEBUG17(x) do {} while (0)
+#endif
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6b9bf23..1263d97 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1570,9 +1570,6 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
- unsigned long last_queue_full;
- unsigned long last_ramp_up;
-
uint16_t vp_idx;
} fc_port_t;
@@ -1589,8 +1586,7 @@ typedef struct fc_port {
*/
#define FCF_FABRIC_DEVICE BIT_0
#define FCF_LOGIN_NEEDED BIT_1
-#define FCF_TAPE_PRESENT BIT_2
-#define FCF_FCP2_DEVICE BIT_3
+#define FCF_FCP2_DEVICE BIT_2
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2259,12 +2255,15 @@ struct qla_hw_data {
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
uint32_t npiv_supported :1;
+ uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
+ uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
+ uint32_t disable_msix_handshake :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2386,7 @@ struct qla_hw_data {
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
(ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e218513..8bc6f53 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
-extern int ql2xqfullrampup;
-extern int ql2xqfulltracking;
extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
@@ -326,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -454,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b74924b..3f8e849 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -205,7 +205,7 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
switch (data[0]) {
case MBS_COMMAND_COMPLETE:
- if (fcport->flags & FCF_TAPE_PRESENT)
+ if (fcport->flags & FCF_FCP2_DEVICE)
opts |= BIT_1;
rval = qla2x00_get_port_database(vha, fcport, opts);
if (rval != QLA_SUCCESS)
@@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
uint32_t cnt;
uint16_t cmd;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return;
+
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -786,6 +791,12 @@ void
qla24xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
+ if (pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure) {
+ return;
+ }
+
ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
@@ -1442,7 +1453,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->firmware_options_2 |=
__constant_cpu_to_le32(BIT_18);
- icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
+ /* Use Disable MSIX Handshake mode for capable adapters */
+ if (IS_MSIX_NACK_CAPABLE(ha)) {
+ icb->firmware_options_2 &=
+ __constant_cpu_to_le32(~BIT_22);
+ ha->flags.disable_msix_handshake = 1;
+ qla_printk(KERN_INFO, ha,
+ "MSIX Handshake Disable Mode turned on\n");
+ } else {
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_22);
+ }
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
@@ -2256,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
+ qla2x00_get_data_rate(vha);
+
/* Determine what we need to do */
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -2703,7 +2726,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
/*
* Logout all previous fabric devices marked lost, except
- * tape devices.
+ * FCP2 devices.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
@@ -2716,7 +2739,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops->fabric_logout(vha,
@@ -2995,7 +3018,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->d_id.b24 = new_fcport->d_id.b24;
fcport->flags |= FCF_LOGIN_NEEDED;
if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3249,9 +3272,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
rval = qla2x00_fabric_login(vha, fcport, next_loopid);
if (rval == QLA_SUCCESS) {
- /* Send an ADISC to tape devices.*/
+ /* Send an ADISC to FCP2 devices.*/
opts = 0;
- if (fcport->flags & FCF_TAPE_PRESENT)
+ if (fcport->flags & FCF_FCP2_DEVICE)
opts |= BIT_1;
rval = qla2x00_get_port_database(vha, fcport, opts);
if (rval != QLA_SUCCESS) {
@@ -3550,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 0;
+ return status;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -4448,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
int ret, retries;
struct qla_hw_data *ha = vha->hw;
+ if (ha->flags.pci_channel_io_perm_failure)
+ return;
if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8049873..6fc63b9 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id)
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_WORD(&reg->hccr);
@@ -811,78 +811,6 @@ skip_rio:
qla2x00_alert_all_vps(rsp, mb);
}
-static void
-qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
-{
- fc_port_t *fcport = data;
- struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = NULL;
-
- if (!ql2xqfulltracking)
- return;
-
- req = vha->req;
- if (!req)
- return;
- if (req->max_q_depth <= sdev->queue_depth)
- return;
-
- if (sdev->ordered_tags)
- scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
- sdev->queue_depth + 1);
- else
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
- sdev->queue_depth + 1);
-
- fcport->last_ramp_up = jiffies;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
-}
-
-static void
-qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
-{
- fc_port_t *fcport = data;
-
- if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
- return;
-
- DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
-}
-
-static inline void
-qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
- srb_t *sp)
-{
- fc_port_t *fcport;
- struct scsi_device *sdev;
-
- if (!ql2xqfulltracking)
- return;
-
- sdev = sp->cmd->device;
- if (sdev->queue_depth >= req->max_q_depth)
- return;
-
- fcport = sp->fcport;
- if (time_before(jiffies,
- fcport->last_ramp_up + ql2xqfullrampup * HZ))
- return;
- if (time_before(jiffies,
- fcport->last_queue_full + ql2xqfullrampup * HZ))
- return;
-
- starget_for_each_device(sdev->sdev_target, fcport,
- qla2x00_adjust_sdev_qdepth_up);
-}
-
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
* @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
/* Save ISP completion status */
sp->cmd->result = DID_OK << 16;
-
- qla2x00_ramp_up_queue_depth(vha, req, sp);
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"scsi(%ld): QUEUE FULL status detected "
"0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
-
- /* Adjust queue depth for all luns on the port. */
- if (!ql2xqfulltracking)
- break;
- fcport->last_queue_full = jiffies;
- starget_for_each_device(cp->device->sdev_target,
- fcport, qla2x00_adjust_sdev_qdepth_down);
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"scsi(%ld): QUEUE FULL status detected "
"0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
-
- /*
- * Adjust queue depth for all luns on the
- * port.
- */
- if (!ql2xqfulltracking)
- break;
- fcport->last_queue_full = jiffies;
- starget_for_each_device(
- cp->device->sdev_target, fcport,
- qla2x00_adjust_sdev_qdepth_down);
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -1938,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
@@ -2006,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
struct scsi_qla_host *vha;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2016,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
ha = rsp->hw;
reg = &ha->iobase->isp24;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
- vha = qla25xx_get_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
- if (!ha->mqenable) {
+ if (!ha->flags.disable_msix_handshake) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -2034,6 +1946,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2043,6 +1957,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
}
ha = rsp->hw;
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (rsp->options & ~BIT_6) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD_RELAXED(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
return IRQ_HANDLED;
@@ -2059,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2070,12 +1993,12 @@ qla24xx_msix_default(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
@@ -2119,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2357,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
-
-struct scsi_qla_host *
-qla25xx_get_host(struct rsp_que *rsp)
-{
- srb_t *sp;
- struct qla_hw_data *ha = rsp->hw;
- struct scsi_qla_host *vha = NULL;
- struct sts_entry_24xx *pkt;
- struct req_que *req;
- uint16_t que;
- uint32_t handle;
-
- pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
- que = MSW(pkt->handle);
- handle = (uint32_t) LSW(pkt->handle);
- req = ha->req_q_map[que];
- if (handle < MAX_OUTSTANDING_COMMANDS) {
- sp = req->outstanding_cmds[handle];
- if (sp)
- return sp->fcport->vha;
- else
- goto base_que;
- }
-base_que:
- vha = pci_get_drvdata(ha->pdev);
- return vha;
-}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 05d595d..056e4d4 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
+ if (ha->flags.pci_channel_io_perm_failure) {
+ DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
+ "Exiting.\n", __func__, vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
- if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
- !ha->flags.mbox_int)
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "Waited %d sec\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
}
/* Check whether we timed out */
@@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (rval == QLA_FUNCTION_TIMEOUT &&
mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
- if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
DEBUG(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__,
@@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
base_vha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occurred. Scheduling ISP "
- "abort.\n");
+ "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (!abort_active) {
@@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
return rval;
}
+
+int
+qla2x00_get_data_rate(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_DATA_RATE;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
+ __func__, vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a47d343..ff17dee 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,11 +636,15 @@ failed:
static void qla_do_work(struct work_struct *work)
{
+ unsigned long flags;
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
- vha = qla25xx_get_host(rsp);
+ spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
+ spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
}
/* create response queue */
@@ -696,6 +700,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
/* Use alternate PCI devfn */
if (LSB(rsp->rid))
options |= BIT_5;
+ /* Enable MSIX handshake mode on for uncapable adapters */
+ if (!IS_MSIX_NACK_CAPABLE(ha))
+ options |= BIT_6;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4166935..8529eb1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
-int ql2xqfulltracking = 1;
-module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfulltracking,
- "Controls whether the driver tracks queue full status "
- "returns and dynamically adjusts a scsi device's queue "
- "depth. Default is 1, perform tracking. Set to 0 to "
- "disable dynamic tracking and adjustment of queue depth.");
-
-int ql2xqfullrampup = 120;
-module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfullrampup,
- "Number of seconds to wait to begin to ramp-up the queue "
- "depth for a device after a queue-full condition has been "
- "detected. Default is 120 seconds.");
-
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
@@ -490,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
srb_t *sp;
int rval;
- if (unlikely(pci_channel_offline(ha->pdev))) {
- if (ha->pdev->error_state == pci_channel_io_frozen)
- cmd->result = DID_REQUEUE << 16;
- else
+ if (ha->flags.eeh_busy) {
+ if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -567,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
int ret = QLA_SUCCESS;
+ if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
+ return ret;
+ }
+
while (CMD_SP(cmd) && wait_iter--) {
msleep(ABORT_POLLING_PERIOD);
}
@@ -1196,7 +1188,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct qla_hw_data *ha = vha->hw;
struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
- fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
struct req_que *req = vha->req;
if (sdev->tagged_supported)
@@ -1205,8 +1196,6 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, req->max_q_depth);
rport->dev_loss_tmo = ha->port_down_retry_count;
- if (sdev->type == TYPE_TAPE)
- fcport->flags |= FCF_TAPE_PRESENT;
return 0;
}
@@ -1217,13 +1206,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
sdev->hostdata = NULL;
}
+static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
+{
+ fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+
+ if (!scsi_track_queue_full(sdev, qdepth))
+ return;
+
+ DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
+ "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+ sdev->queue_depth));
+}
+
+static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
+{
+ fc_port_t *fcport = sdev->hostdata;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+
+ req = vha->req;
+ if (!req)
+ return;
+
+ if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
+ return;
+
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+ sdev->queue_depth));
+}
+
static int
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
- if (reason != SCSI_QDEPTH_DEFAULT)
- return -EOPNOTSUPP;
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ qla2x00_handle_queue_full(sdev, qdepth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
@@ -1777,6 +1814,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
+
+ /* Set EEH reset type to fundamental if required by hba */
+ if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ pdev->needs_freset = 1;
+ pci_save_state(pdev);
+ }
+
/* Configure PCI I/O space */
ret = qla2x00_iospace_config(ha);
if (ret)
@@ -2003,13 +2047,13 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
- base_vha->flags.init_done = 1;
- base_vha->flags.online = 1;
-
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
+ base_vha->flags.init_done = 1;
+ base_vha->flags.online = 1;
+
ha->isp_ops->enable_intrs(ha);
scsi_scan_host(host);
@@ -2141,6 +2185,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
+ /* Kill the kernel thread for this host */
+ if (ha->dpc_thread) {
+ struct task_struct *t = ha->dpc_thread;
+
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+ }
+
qla25xx_delete_queues(vha);
if (ha->flags.fce_enabled)
@@ -2152,6 +2214,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(vha);
+ vha->flags.online = 0;
+
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops->disable_intrs(ha);
@@ -2738,7 +2802,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
- if (fcport->flags & FCF_TAPE_PRESENT)
+ if (fcport->flags & FCF_FCP2_DEVICE)
ha->isp_ops->fabric_logout(vha,
fcport->loop_id,
fcport->d_id.b.domain,
@@ -2826,6 +2890,13 @@ qla2x00_do_dpc(void *data)
if (!base_vha->flags.init_done)
continue;
+ if (ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "qla2x00_do_dpc: dpc_flags: %lx\n",
+ base_vha->dpc_flags));
+ continue;
+ }
+
DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
ha->dpc_active = 1;
@@ -3016,8 +3087,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
int index;
srb_t *sp;
int t;
+ uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+
+ /* Hardware read to raise pending EEH errors during mailbox waits. */
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
/*
* Ports - Port down timer.
*
@@ -3062,7 +3138,10 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha) && vha->link_down_timeout)
atomic_set(&vha->loop_state, LOOP_DEAD);
- /* Schedule an ISP abort to return any tape commands. */
+ /*
+ * Schedule an ISP abort to return any FCP2-device
+ * commands.
+ */
/* NPIV - scan physical port only */
if (!vha->vp_idx) {
spin_lock_irqsave(&ha->hardware_lock,
@@ -3079,7 +3158,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (sp->ctx)
continue;
sfcp = sp->fcport;
- if (!(sfcp->flags & FCF_TAPE_PRESENT))
+ if (!(sfcp->flags & FCF_FCP2_DEVICE))
continue;
set_bit(ISP_ABORT_NEEDED,
@@ -3219,16 +3298,23 @@ qla2x00_release_firmware(void)
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = vha->hw;
+
+ DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
+ state));
switch (state) {
case pci_channel_io_normal:
+ ha->flags.eeh_busy = 0;
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ ha->flags.eeh_busy = 1;
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+ ha->flags.pci_channel_io_perm_failure = 1;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -3279,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int rc;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
@@ -3287,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
if (rc) {
qla_printk(KERN_WARNING, ha,
"Can't re-enable PCI device after reset.\n");
-
return ret;
}
- pci_set_master(pdev);
if (ha->isp_ops->pci_config(base_vha))
return ret;
+#ifdef QL_DEBUG_LEVEL_17
+ {
+ uint8_t b;
+ uint32_t i;
+
+ printk("slot_reset_1: ");
+ for (i = 0; i < 256; i++) {
+ pci_read_config_byte(ha->pdev, i, &b);
+ printk("%s%02x", (i%16) ? " " : "\n", b);
+ }
+ printk("\n");
+ }
+#endif
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "slot_reset-return:ret=%x\n", ret));
+
return ret;
}
@@ -3310,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
+
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"the device failed to resume I/O "
"from slot/link_reset");
}
+
+ ha->flags.eeh_busy = 0;
+
pci_cleanup_aer_uncorrect_error_status(pdev);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 010e69b..371dc89 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2292,11 +2292,14 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t faddr, left, burst;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ goto try_fast;
if (offset & 0xfff)
goto slow_read;
if (length < OPTROM_BURST_SIZE)
goto slow_read;
+try_fast:
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 807e0db..ed36279 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k7"
+#define QLA2XXX_VERSION "8.03.01-k10"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e495d38..c664242 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
req->next_rq->resid_len = scsi_in(cmd)->resid;
+ scsi_release_buffers(cmd);
blk_end_request_all(req, 0);
- scsi_release_buffers(cmd);
scsi_next_command(cmd);
return;
}
@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
+ case 0x14: /* space allocation in progress */
action = ACTION_DELAYED_RETRY;
break;
default:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6531c91..653f22a 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
return error;
error = transport_class_register(&fc_vport_class);
if (error)
- return error;
+ goto unreg_host_class;
error = transport_class_register(&fc_rport_class);
if (error)
- return error;
- return transport_class_register(&fc_transport_class);
+ goto unreg_vport_class;
+ error = transport_class_register(&fc_transport_class);
+ if (error)
+ goto unreg_rport_class;
+ return 0;
+
+unreg_rport_class:
+ transport_class_unregister(&fc_rport_class);
+unreg_vport_class:
+ transport_class_unregister(&fc_vport_class);
+unreg_host_class:
+ transport_class_unregister(&fc_host_class);
+ return error;
}
static void __exit fc_transport_exit(void)
@@ -3516,7 +3527,10 @@ fc_bsg_job_timeout(struct request *req)
if (!done && i->f->bsg_timeout) {
/* call LLDD to abort the i/o as it has timed out */
err = i->f->bsg_timeout(job);
- if (err)
+ if (err == -EAGAIN) {
+ job->ref_cnt--;
+ return BLK_EH_RESET_TIMER;
+ } else if (err)
printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
"abort failed with status %d\n", err);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c72..255da53 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n", sdkp->ATO);
}
+static ssize_t
+sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
sd_store_manage_start_stop),
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
+ __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR_NULL,
};
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
}
/**
+ * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_prepare_discard(struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct bio *bio = rq->bio;
+ sector_t sector = bio->bi_sector;
+ unsigned int num = bio_sectors(bio);
+
+ if (sdkp->device->sector_size == 4096) {
+ sector >>= 3;
+ num >>= 3;
+ }
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->timeout = SD_TIMEOUT;
+
+ memset(rq->cmd, 0, rq->cmd_len);
+
+ if (sdkp->unmap) {
+ char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+
+ rq->cmd[0] = UNMAP;
+ rq->cmd[8] = 24;
+ rq->cmd_len = 10;
+
+ /* Ensure that data length matches payload */
+ rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
+
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(sector, &buf[8]);
+ put_unaligned_be32(num, &buf[16]);
+
+ kunmap_atomic(buf, KM_USER0);
+ } else {
+ rq->cmd[0] = WRITE_SAME_16;
+ rq->cmd[1] = 0x8; /* UNMAP */
+ put_unaligned_be64(sector, &rq->cmd[2]);
+ put_unaligned_be32(num, &rq->cmd[10]);
+ rq->cmd_len = 16;
+ }
+
+ return BLKPREP_OK;
+}
+
+/**
* sd_init_command - build a scsi (read or write) command from
* information in the request structure.
* @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
int ret, host_dif;
unsigned char protect;
+ /*
+ * Discard request come in as REQ_TYPE_FS but we turn them into
+ * block PC requests to make life easier.
+ */
+ if (blk_discard_rq(rq))
+ ret = sd_prepare_discard(rq);
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
+ if (buffer[14] & 0x80) { /* TPE */
+ struct request_queue *q = sdp->request_queue;
+
+ sdkp->thin_provisioning = 1;
+ q->limits.discard_granularity = sdkp->hw_sector_size;
+ q->limits.max_discard_sectors = 0xffffffff;
+
+ if (buffer[14] & 0x40) /* TPRZ */
+ q->limits.discard_zeroes_data = 1;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ }
+
sdkp->capacity = lba + 1;
return sector_size;
}
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
char *buffer;
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
blk_queue_io_opt(sdkp->disk->queue,
get_unaligned_be32(&buffer[12]) * sector_sz);
+ /* Thin provisioning enabled and page length indicates TP support */
+ if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count, granularity;
+
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
+
+ if (lba_count) {
+ q->limits.max_discard_sectors =
+ lba_count * sector_sz >> 9;
+
+ if (desc_count)
+ sdkp->unmap = 1;
+ }
+
+ granularity = get_unaligned_be32(&buffer[28]);
+
+ if (granularity)
+ q->limits.discard_granularity = granularity * sector_sz;
+
+ if (buffer[32] & 0x80)
+ q->limits.discard_alignment =
+ get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ }
+
kfree(buffer);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804..43d3caf 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
unsigned first_scan : 1;
+ unsigned thin_provisioning : 1;
+ unsigned unmap : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad59abb..d04ea9a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
SRpnt->waiting = waiting;
if (STp->buffer->do_dio) {
+ mdata->page_order = 0;
mdata->nr_entries = STp->buffer->sg_segs;
mdata->pages = STp->buffer->mapped_pages;
} else {
+ mdata->page_order = STp->buffer->reserved_page_order;
mdata->nr_entries =
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
- STp->buffer->map_data.pages = STp->buffer->reserved_pages;
- STp->buffer->map_data.offset = 0;
+ mdata->pages = STp->buffer->reserved_pages;
+ mdata->offset = 0;
}
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
priority |= __GFP_ZERO;
if (STbuffer->frp_segs) {
- order = STbuffer->map_data.page_order;
+ order = STbuffer->reserved_page_order;
b_size = PAGE_SIZE << order;
} else {
for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
segs++;
}
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
- STbuffer->map_data.page_order = order;
+ STbuffer->reserved_page_order = order;
return 1;
}
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
for (i=0; i < st_bp->frp_segs; i++)
memset(page_address(st_bp->reserved_pages[i]), 0,
- PAGE_SIZE << st_bp->map_data.page_order);
+ PAGE_SIZE << st_bp->reserved_page_order);
st_bp->cleared = 1;
}
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
/* Release the extra buffer */
static void normalize_buffer(struct st_buffer * STbuffer)
{
- int i, order = STbuffer->map_data.page_order;
+ int i, order = STbuffer->reserved_page_order;
for (i = 0; i < STbuffer->frp_segs; i++) {
__free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
}
STbuffer->frp_segs = 0;
STbuffer->sg_segs = 0;
- STbuffer->map_data.page_order = 0;
+ STbuffer->reserved_page_order = 0;
STbuffer->map_data.offset = 0;
}
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
for (i = 0, offset = st_bp->buffer_bytes;
i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
for (i = 0, offset = st_bp->read_pointer;
i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
{
int src_seg, dst_seg, src_offset = 0, dst_offset;
int count, total;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
if (offset == 0)
return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
}
mdata->offset = uaddr & ~PAGE_MASK;
- mdata->page_order = 0;
STbp->mapped_pages = pages;
return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b..f91a67c 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
struct st_request *last_SRpnt;
struct st_cmdstatus cmdstat;
struct page **reserved_pages;
+ int reserved_page_order;
struct page **mapped_pages;
struct rq_map_data map_data;
unsigned char *b_data;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3058bb1..fd7b15b 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
}
break;
case INQUIRY:
+ if (lun >= host->max_lun) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
if (id != host->max_id - 1)
break;
if (!lun && !cmd->device->channel &&
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 1e3d193..8681f13 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -58,7 +58,7 @@ static const char serial21285_name[] = "Footbridge UART";
static void serial21285_stop_tx(struct uart_port *port)
{
if (tx_enabled(port)) {
- disable_irq(IRQ_CONTX);
+ disable_irq_nosync(IRQ_CONTX);
tx_enabled(port) = 0;
}
}
@@ -74,7 +74,7 @@ static void serial21285_start_tx(struct uart_port *port)
static void serial21285_stop_rx(struct uart_port *port)
{
if (rx_enabled(port)) {
- disable_irq(IRQ_CONRX);
+ disable_irq_nosync(IRQ_CONRX);
rx_enabled(port) = 0;
}
}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e..e9b15c3 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define PASS_LIMIT 256
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+
/*
* We default to IRQ0 for the "no irq" hack. Some
* machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
spin_unlock_irqrestore(&up->port.lock, flags);
- return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
}
static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 36ede02..24485cc 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -328,15 +328,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
/* U.S. Robotics 56K Voice INT PnP*/
{ "USR9190", 0 },
/* Wacom tablets */
- { "WACF004", 0 },
- { "WACF005", 0 },
- { "WACF006", 0 },
- { "WACF007", 0 },
- { "WACF008", 0 },
- { "WACF009", 0 },
- { "WACF00A", 0 },
- { "WACF00B", 0 },
- { "WACF00C", 0 },
+ { "WACFXXX", 0 },
/* Compaq touchscreen */
{ "FPI2002", 0 },
/* Fujitsu Stylistic touchscreens */
@@ -354,6 +346,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "FUJ02E5", 0 },
/* Fujitsu P-series tablet PC device */
{ "FUJ02E6", 0 },
+ /* Fujitsu Wacom 2FGT Tablet PC device */
+ { "FUJ02E7", 0 },
/*
* LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
* disguise)
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 18130f1..60d665a 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1088,7 +1088,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
int *parity, int *bits)
{
- if ( readl(sport->port.membase + UCR1) | UCR1_UARTEN ) {
+ if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
/* ok, the port was enabled */
unsigned int ucr2, ubir,ubmr, uartclk;
unsigned int baud_raw;
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 0700cd1..683e66f 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -411,6 +411,17 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap)
goto ack_tx_int;
}
+ /* Under some circumstances, we see interrupts reported for
+ * a closed channel. The interrupt mask in R1 is clear, but
+ * R3 still signals the interrupts and we see them when taking
+ * an interrupt for the other channel (this could be a qemu
+ * bug but since the ESCC doc doesn't specify precsiely whether
+ * R3 interrup status bits are masked by R1 interrupt enable
+ * bits, better safe than sorry). --BenH.
+ */
+ if (!ZS_IS_OPEN(uap))
+ goto ack_tx_int;
+
if (uap->port.x_char) {
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
write_zsdata(uap, uap->port.x_char);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 047530b..7f28307 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -385,13 +385,20 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
}
/*
- * As a last resort, if the quotient is zero,
- * default to 9600 bps
+ * As a last resort, if the range cannot be met then clip to
+ * the nearest chip supported rate.
*/
- if (!hung_up)
- tty_termios_encode_baud_rate(termios, 9600, 9600);
+ if (!hung_up) {
+ if (baud <= min)
+ tty_termios_encode_baud_rate(termios,
+ min + 1, min + 1);
+ else
+ tty_termios_encode_baud_rate(termios,
+ max - 1, max - 1);
+ }
}
-
+ /* Should never happen */
+ WARN_ON(1);
return 0;
}
@@ -2006,12 +2013,6 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mutex_lock(&port->mutex);
- if (!console_suspend_enabled && uart_console(uport)) {
- /* we're going to avoid suspending serial console */
- mutex_unlock(&port->mutex);
- return 0;
- }
-
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (device_may_wakeup(tty_dev)) {
enable_irq_wake(uport->irq);
@@ -2019,20 +2020,23 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
mutex_unlock(&port->mutex);
return 0;
}
- uport->suspended = 1;
+ if (console_suspend_enabled || !uart_console(uport))
+ uport->suspended = 1;
if (port->flags & ASYNC_INITIALIZED) {
const struct uart_ops *ops = uport->ops;
int tries;
- set_bit(ASYNCB_SUSPENDED, &port->flags);
- clear_bit(ASYNCB_INITIALIZED, &port->flags);
+ if (console_suspend_enabled || !uart_console(uport)) {
+ set_bit(ASYNCB_SUSPENDED, &port->flags);
+ clear_bit(ASYNCB_INITIALIZED, &port->flags);
- spin_lock_irq(&uport->lock);
- ops->stop_tx(uport);
- ops->set_mctrl(uport, 0);
- ops->stop_rx(uport);
- spin_unlock_irq(&uport->lock);
+ spin_lock_irq(&uport->lock);
+ ops->stop_tx(uport);
+ ops->set_mctrl(uport, 0);
+ ops->stop_rx(uport);
+ spin_unlock_irq(&uport->lock);
+ }
/*
* Wait for the transmitter to empty.
@@ -2047,16 +2051,18 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
drv->dev_name,
drv->tty_driver->name_base + uport->line);
- ops->shutdown(uport);
+ if (console_suspend_enabled || !uart_console(uport))
+ ops->shutdown(uport);
}
/*
* Disable the console device before suspending.
*/
- if (uart_console(uport))
+ if (console_suspend_enabled && uart_console(uport))
console_stop(uport->cons);
- uart_change_pm(state, 3);
+ if (console_suspend_enabled || !uart_console(uport))
+ uart_change_pm(state, 3);
mutex_unlock(&port->mutex);
@@ -2073,29 +2079,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
mutex_lock(&port->mutex);
- if (!console_suspend_enabled && uart_console(uport)) {
- /* no need to resume serial console, it wasn't suspended */
- /*
- * First try to use the console cflag setting.
- */
- memset(&termios, 0, sizeof(struct ktermios));
- termios.c_cflag = uport->cons->cflag;
- /*
- * If that's unset, use the tty termios setting.
- */
- if (termios.c_cflag == 0)
- termios = *state->port.tty->termios;
- else {
- termios.c_ispeed = termios.c_ospeed =
- tty_termios_input_baud_rate(&termios);
- termios.c_ispeed = termios.c_ospeed =
- tty_termios_baud_rate(&termios);
- }
- uport->ops->set_termios(uport, &termios, NULL);
- mutex_unlock(&port->mutex);
- return 0;
- }
-
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
disable_irq_wake(uport->irq);
@@ -2121,21 +2104,23 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
- ret = ops->startup(uport);
- if (ret == 0) {
- uart_change_speed(state, NULL);
- spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, uport->mctrl);
- ops->start_tx(uport);
- spin_unlock_irq(&uport->lock);
- set_bit(ASYNCB_INITIALIZED, &port->flags);
- } else {
- /*
- * Failed to resume - maybe hardware went away?
- * Clear the "initialized" flag so we won't try
- * to call the low level drivers shutdown method.
- */
- uart_shutdown(state);
+ if (console_suspend_enabled || !uart_console(uport)) {
+ ret = ops->startup(uport);
+ if (ret == 0) {
+ uart_change_speed(state, NULL);
+ spin_lock_irq(&uport->lock);
+ ops->set_mctrl(uport, uport->mctrl);
+ ops->start_tx(uport);
+ spin_unlock_irq(&uport->lock);
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ } else {
+ /*
+ * Failed to resume - maybe hardware went away?
+ * Clear the "initialized" flag so we won't try
+ * to call the low level drivers shutdown method.
+ */
+ uart_shutdown(state);
+ }
}
clear_bit(ASYNCB_SUSPENDED, &port->flags);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index fc413f0..95421fa 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -146,7 +146,8 @@ static void quirk_wakeup_oxsemi(struct pcmcia_device *link)
{
struct serial_info *info = link->priv;
- outb(12, info->c950ctrl + 1);
+ if (info->c950ctrl)
+ outb(12, info->c950ctrl + 1);
}
/* request_region? oxsemi branch does no request_region too... */
@@ -757,6 +758,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
@@ -819,6 +821,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */
PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
@@ -827,7 +830,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
@@ -861,6 +864,18 @@ static struct pcmcia_device_id serial_ids[] = {
};
MODULE_DEVICE_TABLE(pcmcia, serial_ids);
+MODULE_FIRMWARE("cis/PCMLM28.cis");
+MODULE_FIRMWARE("cis/DP83903.cis");
+MODULE_FIRMWARE("cis/3CCFEM556.cis");
+MODULE_FIRMWARE("cis/3CXEM556.cis");
+MODULE_FIRMWARE("cis/SW_8xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_7xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_555_SER.cis");
+MODULE_FIRMWARE("cis/MT5634ZLX.cis");
+MODULE_FIRMWARE("cis/COMpad2.cis");
+MODULE_FIRMWARE("cis/COMpad4.cis");
+MODULE_FIRMWARE("cis/RS-COM-2P.cis");
+
static struct pcmcia_driver serial_cs_driver = {
.owner = THIS_MODULE,
.drv = {
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 7e3f4ff..42f3333 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -222,9 +222,9 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
Set SCP6MD1,0 = {01} (output) */
__raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
- data = ctrl_inb(SCPDR);
+ data = __raw_readb(SCPDR);
/* Set /RTS2 (bit6) = 0 */
- ctrl_outb(data & 0xbf, SCPDR);
+ __raw_writeb(data & 0xbf, SCPDR);
}
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
@@ -897,11 +897,21 @@ static void sci_shutdown(struct uart_port *port)
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- unsigned int status, baud, smr_val;
+ unsigned int status, baud, smr_val, max_baud;
int t = -1;
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- if (likely(baud))
+ /*
+ * earlyprintk comes here early on with port->uartclk set to zero.
+ * the clock framework is not up and running at this point so here
+ * we assume that 115200 is the maximum baud rate. please note that
+ * the baud rate is not programmed during earlyprintk - it is assumed
+ * that the previous boot loader has enabled required clocks and
+ * setup the baud rate generator hardware for us already.
+ */
+ max_baud = port->uartclk ? port->uartclk / 16 : 115200;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
+ if (likely(baud && port->uartclk))
t = SCBRR_VALUE(baud, port->uartclk);
do {
@@ -1042,11 +1052,26 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->port.ops = &sci_uart_ops;
sci_port->port.iotype = UPIO_MEM;
sci_port->port.line = index;
- sci_port->port.fifosize = 1;
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
- sci_port->enable = sci_clk_enable;
- sci_port->disable = sci_clk_disable;
+
+ switch (p->type) {
+ case PORT_SCIFA:
+ sci_port->port.fifosize = 64;
+ break;
+ case PORT_SCIF:
+ sci_port->port.fifosize = 16;
+ break;
+ default:
+ sci_port->port.fifosize = 1;
+ break;
+ }
+
+ if (dev) {
+ sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
+ sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->enable = sci_clk_enable;
+ sci_port->disable = sci_clk_disable;
+ sci_port->port.dev = &dev->dev;
+ }
sci_port->break_timer.data = (unsigned long)sci_port;
sci_port->break_timer.function = sci_break_timer;
@@ -1057,7 +1082,6 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
sci_port->port.flags = p->flags;
- sci_port->port.dev = &dev->dev;
sci_port->type = sci_port->port.type = p->type;
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
@@ -1101,7 +1125,7 @@ static void serial_console_write(struct console *co, const char *s,
sci_port->disable(port);
}
-static int __init serial_console_setup(struct console *co, char *options)
+static int __devinit serial_console_setup(struct console *co, char *options)
{
struct sci_port *sci_port;
struct uart_port *port;
@@ -1119,9 +1143,14 @@ static int __init serial_console_setup(struct console *co, char *options)
if (co->index >= SCI_NPORTS)
co->index = 0;
- sci_port = &sci_ports[co->index];
- port = &sci_port->port;
- co->data = port;
+ if (co->data) {
+ port = co->data;
+ sci_port = to_sci_port(port);
+ } else {
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+ co->data = port;
+ }
/*
* Also need to check port->type, we don't actually have any
@@ -1165,6 +1194,15 @@ static int __init sci_console_init(void)
return 0;
}
console_initcall(sci_console_init);
+
+static struct sci_port early_serial_port;
+static struct console early_serial_console = {
+ .name = "early_ttySC",
+ .write = serial_console_write,
+ .flags = CON_PRINTBUFFER,
+};
+static char early_serial_buf[32];
+
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1250,6 +1288,21 @@ static int __devinit sci_probe(struct platform_device *dev)
struct sh_sci_priv *priv;
int i, ret = -EINVAL;
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+ if (is_early_platform_device(dev)) {
+ if (dev->id == -1)
+ return -ENOTSUPP;
+ early_serial_console.index = dev->id;
+ early_serial_console.data = &early_serial_port.port;
+ sci_init_single(NULL, &early_serial_port, dev->id, p);
+ serial_console_setup(&early_serial_console, early_serial_buf);
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+ register_console(&early_serial_console);
+ return 0;
+ }
+#endif
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1349,6 +1402,10 @@ static void __exit sci_exit(void)
uart_unregister_driver(&sci_uart_driver);
}
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+early_platform_init_buffer("earlyprintk", &sci_driver,
+ early_serial_buf, ARRAY_SIZE(early_serial_buf));
+#endif
module_init(sci_init);
module_exit(sci_exit);
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index a32094ee..0efcded 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -517,20 +517,20 @@ static const struct __attribute__((packed)) {
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfffffe80)
- return ctrl_inb(SCPDR)&0x01 ? 1 : 0; /* SCI */
+ return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
if (port->mapbase == 0xa4000150)
- return ctrl_inb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
+ return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xa4000140)
- return ctrl_inb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
+ return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == SCIF0)
- return ctrl_inb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
+ return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
if (port->mapbase == SCIF2)
- return ctrl_inb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
+ return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
@@ -557,68 +557,68 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
+ return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
if (port->mapbase == 0xffe80000)
- return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe80000)
- return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfe4b0000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0;
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
if (port->mapbase == 0xfe4c0000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0;
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
if (port->mapbase == 0xfe4d0000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0;
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfe600000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfe610000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfe620000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe20000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe30000)
- return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
+ return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
+ return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
if (port->mapbase == 0xffe10000)
- return ctrl_inb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
+ return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
if (port->mapbase == 0xffe20000)
- return ctrl_inb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
+ return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
return 1;
}
@@ -626,17 +626,17 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
+ return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
if (port->mapbase == 0xffe10000)
- return ctrl_inb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
+ return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
if (port->mapbase == 0xffe20000)
- return ctrl_inb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
+ return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
if (port->mapbase == 0xa4e30000)
- return ctrl_inb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
+ return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
if (port->mapbase == 0xa4e40000)
- return ctrl_inb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
+ return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
if (port->mapbase == 0xa4e50000)
- return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
+ return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
@@ -645,9 +645,9 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->type == PORT_SCIF)
- return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
+ return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
if (port->type == PORT_SCIFA)
- return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
+ return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
@@ -665,11 +665,11 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe08000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
return 1;
}
@@ -677,20 +677,20 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xff923000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xff924000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xff925000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
@@ -698,17 +698,17 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffea0000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffeb0000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffec0000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffed0000)
- return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffee0000)
- return ctrl_inw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffef0000)
- return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
@@ -718,22 +718,22 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfffe8000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe8800)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe9000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe9800)
- return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
#if defined(CONFIG_CPU_SUBTYPE_SH7201)
if (port->mapbase == 0xfffeA000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeA800)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeB000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeB800)
- return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
#endif
return 1;
}
@@ -741,24 +741,24 @@ static inline int sci_rxd_in(struct uart_port *port)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xf8400000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xf8410000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xf8420000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffc30000)
- return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc40000)
- return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc50000)
- return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc60000)
- return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+ return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#endif
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 377f271..ab2ab3c 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
spin_unlock_irqrestore(&port->lock, flags);
}
-static int __init ulite_console_setup(struct console *co, char *options)
+static int __devinit ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = 9600;
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index a7e5c2e..d5d7f23 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -806,6 +806,8 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
if (d->state.event != PM_EVENT_FREEZE)
break;
for_each_irq_desc(irq, desc) {
+ if (desc->handle_irq == intc_redirect_irq)
+ continue;
if (desc->chip != &d->chip)
continue;
if (desc->status & IRQ_DISABLED)
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 841ed50..082604e 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -71,7 +71,7 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
pos = dr->reg_width - (in_pos + 1);
- pr_debug("write_bit addr = %lx, value = %ld, pos = %ld, "
+ pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
"r_width = %ld\n",
dr->reg, !!value, pos, dr->reg_width);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2d9d703..f55eb01 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -216,6 +216,17 @@ config SPI_S3C24XX
help
SPI driver for Samsung S3C24XX series ARM SoCs
+config SPI_S3C24XX_FIQ
+ bool "S3C24XX driver with FIQ pseudo-DMA"
+ depends on SPI_S3C24XX
+ select FIQ
+ help
+ Enable FIQ support for the S3C24XX SPI driver to provide pseudo
+ DMA by using the fast-interrupt request framework, This allows
+ the driver to get DMA-like performance when there are either
+ no free DMA channels, or when doing transfers that required both
+ TX and RX data paths.
+
config SPI_S3C24XX_GPIO
tristate "Samsung S3C24XX series SPI by GPIO"
depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -226,6 +237,13 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+config SPI_S3C64XX
+ tristate "Samsung S3C64XX series type SPI"
+ depends on ARCH_S3C64XX && EXPERIMENTAL
+ select S3C64XX_DMA
+ help
+ SPI driver for Samsung S3C64XX and newer SoCs.
+
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on SUPERH && HAVE_CLK
@@ -289,6 +307,16 @@ config SPI_NUC900
# Add new SPI master controllers in alphabetical order above this line
#
+config SPI_DESIGNWARE
+ bool "DesignWare SPI controller core support"
+ depends on SPI_MASTER
+ help
+ general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+ tristate "PCI interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && PCI
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ed8c167..f3d2810 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
+obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
+obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
@@ -30,7 +32,8 @@ obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
-obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
+obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
+obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
@@ -39,6 +42,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+
+# special build for s3c24xx spi driver with fiq support
+spi_s3c24xx_hw-y := spi_s3c24xx.o
+spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
+
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index f5b3fdb..d21c24e 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -189,14 +189,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
/* use scratch buffer only when rx or tx data is unspecified */
if (xfer->rx_buf)
- *rx_dma = xfer->rx_dma + xfer->len - len;
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
else {
*rx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
len = BUFFER_SIZE;
}
if (xfer->tx_buf)
- *tx_dma = xfer->tx_dma + xfer->len - len;
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
else {
*tx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
@@ -788,7 +788,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
spin_lock_init(&as->lock);
INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
- as->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
+ as->regs = ioremap(regs->start, resource_size(regs));
if (!as->regs)
goto out_free_buffer;
as->irq = irq;
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
new file mode 100644
index 0000000..31620fa
--- /dev/null
+++ b/drivers/spi/dw_spi.c
@@ -0,0 +1,944 @@
+/*
+ * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
+
+#define MRST_SPI_DEASSERT 0
+#define MRST_SPI_ASSERT 1
+
+/* Slave spi_dev related */
+struct chip_data {
+ u16 cr0;
+ u8 cs; /* chip select pin */
+ u8 n_bytes; /* current is a 1/2/4 byte op */
+ u8 tmode; /* TR/TO/RO/EEPROM */
+ u8 type; /* SPI/SSP/MicroWire */
+
+ u8 poll_mode; /* 1 means use poll mode */
+
+ u32 dma_width;
+ u32 rx_threshold;
+ u32 tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u16 clk_div; /* baud rate divider */
+ u32 speed_hz; /* baud rate */
+ int (*write)(struct dw_spi *dws);
+ int (*read)(struct dw_spi *dws);
+ void (*cs_control)(u32 command);
+};
+
+#ifdef CONFIG_DEBUG_FS
+static int spi_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define SPI_REGS_BUFSIZE 1024
+static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_spi *dws;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ dws = file->private_data;
+
+ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "MRST SPI0 registers:\n");
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SER: \t\t0x%08x\n", dw_readl(dws, ser));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SR: \t\t0x%08x\n", dw_readl(dws, sr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations mrst_spi_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = spi_show_regs_open,
+ .read = spi_show_regs,
+};
+
+static int mrst_spi_debugfs_init(struct dw_spi *dws)
+{
+ dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
+ if (!dws->debugfs)
+ return -ENOMEM;
+
+ debugfs_create_file("registers", S_IFREG | S_IRUGO,
+ dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
+ return 0;
+}
+
+static void mrst_spi_debugfs_remove(struct dw_spi *dws)
+{
+ if (dws->debugfs)
+ debugfs_remove_recursive(dws->debugfs);
+}
+
+#else
+static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
+{
+}
+
+static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void wait_till_not_busy(struct dw_spi *dws)
+{
+ unsigned long end = jiffies + usecs_to_jiffies(1000);
+
+ while (time_before(jiffies, end)) {
+ if (!(dw_readw(dws, sr) & SR_BUSY))
+ return;
+ }
+ dev_err(&dws->master->dev,
+ "DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
+}
+
+static void flush(struct dw_spi *dws)
+{
+ while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ dw_readw(dws, dr);
+
+ wait_till_not_busy(dws);
+}
+
+static void null_cs_control(u32 command)
+{
+}
+
+static int null_writer(struct dw_spi *dws)
+{
+ u8 n_bytes = dws->n_bytes;
+
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+ dw_writew(dws, dr, 0);
+ dws->tx += n_bytes;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int null_reader(struct dw_spi *dws)
+{
+ u8 n_bytes = dws->n_bytes;
+
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ dw_readw(dws, dr);
+ dws->rx += n_bytes;
+ }
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static int u8_writer(struct dw_spi *dws)
+{
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+
+ dw_writew(dws, dr, *(u8 *)(dws->tx));
+ ++dws->tx;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int u8_reader(struct dw_spi *dws)
+{
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ *(u8 *)(dws->rx) = dw_readw(dws, dr);
+ ++dws->rx;
+ }
+
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static int u16_writer(struct dw_spi *dws)
+{
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+
+ dw_writew(dws, dr, *(u16 *)(dws->tx));
+ dws->tx += 2;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int u16_reader(struct dw_spi *dws)
+{
+ u16 temp;
+
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ temp = dw_readw(dws, dr);
+ *(u16 *)(dws->rx) = temp;
+ dws->rx += 2;
+ }
+
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static void *next_transfer(struct dw_spi *dws)
+{
+ struct spi_message *msg = dws->cur_msg;
+ struct spi_transfer *trans = dws->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ dws->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ } else
+ return DONE_STATE;
+}
+
+/*
+ * Note: first step is the protocol driver prepares
+ * a dma-capable memory, and this func just need translate
+ * the virt addr to physical
+ */
+static int map_dma_buffers(struct dw_spi *dws)
+{
+ if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
+ || !dws->cur_chip->enable_dma)
+ return 0;
+
+ if (dws->cur_transfer->tx_dma)
+ dws->tx_dma = dws->cur_transfer->tx_dma;
+
+ if (dws->cur_transfer->rx_dma)
+ dws->rx_dma = dws->cur_transfer->rx_dma;
+
+ return 1;
+}
+
+/* Caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct dw_spi *dws)
+{
+ struct spi_transfer *last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&dws->lock, flags);
+ msg = dws->cur_msg;
+ dws->cur_msg = NULL;
+ dws->cur_transfer = NULL;
+ dws->prev_chip = dws->cur_chip;
+ dws->cur_chip = NULL;
+ dws->dma_mapped = 0;
+ queue_work(dws->workqueue, &dws->pump_messages);
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ if (!last_transfer->cs_change)
+ dws->cs_control(MRST_SPI_DEASSERT);
+
+ msg->state = NULL;
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static void int_error_stop(struct dw_spi *dws, const char *msg)
+{
+ /* Stop and reset hw */
+ flush(dws);
+ spi_enable_chip(dws, 0);
+
+ dev_err(&dws->master->dev, "%s\n", msg);
+ dws->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&dws->pump_transfers);
+}
+
+static void transfer_complete(struct dw_spi *dws)
+{
+ /* Update total byte transfered return count actual bytes read */
+ dws->cur_msg->actual_length += dws->len;
+
+ /* Move to next transfer */
+ dws->cur_msg->state = next_transfer(dws);
+
+ /* Handle end of message */
+ if (dws->cur_msg->state == DONE_STATE) {
+ dws->cur_msg->status = 0;
+ giveback(dws);
+ } else
+ tasklet_schedule(&dws->pump_transfers);
+}
+
+static irqreturn_t interrupt_transfer(struct dw_spi *dws)
+{
+ u16 irq_status, irq_mask = 0x3f;
+
+ irq_status = dw_readw(dws, isr) & irq_mask;
+ /* Error handling */
+ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
+ dw_readw(dws, txoicr);
+ dw_readw(dws, rxoicr);
+ dw_readw(dws, rxuicr);
+ int_error_stop(dws, "interrupt_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ /* INT comes from tx */
+ if (dws->tx && (irq_status & SPI_INT_TXEI)) {
+ while (dws->tx < dws->tx_end)
+ dws->write(dws);
+
+ if (dws->tx == dws->tx_end) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ transfer_complete(dws);
+ }
+ }
+
+ /* INT comes from rx */
+ if (dws->rx && (irq_status & SPI_INT_RXFI)) {
+ if (dws->read(dws))
+ transfer_complete(dws);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dw_spi_irq(int irq, void *dev_id)
+{
+ struct dw_spi *dws = dev_id;
+
+ if (!dws->cur_msg) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ return dws->transfer_handler(dws);
+}
+
+/* Must be called inside pump_transfers() */
+static void poll_transfer(struct dw_spi *dws)
+{
+ if (dws->tx) {
+ while (dws->write(dws))
+ dws->read(dws);
+ }
+
+ dws->read(dws);
+ transfer_complete(dws);
+}
+
+static void dma_transfer(struct dw_spi *dws, int cs_change)
+{
+}
+
+static void pump_transfers(unsigned long data)
+{
+ struct dw_spi *dws = (struct dw_spi *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct spi_device *spi = NULL;
+ struct chip_data *chip = NULL;
+ u8 bits = 0;
+ u8 imask = 0;
+ u8 cs_change = 0;
+ u16 clk_div = 0;
+ u32 speed = 0;
+ u32 cr0 = 0;
+
+ /* Get current state information */
+ message = dws->cur_msg;
+ transfer = dws->cur_transfer;
+ chip = dws->cur_chip;
+ spi = message->spi;
+
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ goto early_exit;
+ }
+
+ /* Delay if requested at end of transfer*/
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ dws->n_bytes = chip->n_bytes;
+ dws->dma_width = chip->dma_width;
+ dws->cs_control = chip->cs_control;
+
+ dws->rx_dma = transfer->rx_dma;
+ dws->tx_dma = transfer->tx_dma;
+ dws->tx = (void *)transfer->tx_buf;
+ dws->tx_end = dws->tx + transfer->len;
+ dws->rx = transfer->rx_buf;
+ dws->rx_end = dws->rx + transfer->len;
+ dws->write = dws->tx ? chip->write : null_writer;
+ dws->read = dws->rx ? chip->read : null_reader;
+ dws->cs_change = transfer->cs_change;
+ dws->len = dws->cur_transfer->len;
+ if (chip != dws->prev_chip)
+ cs_change = 1;
+
+ cr0 = chip->cr0;
+
+ /* Handle per transfer options for bpw and speed */
+ if (transfer->speed_hz) {
+ speed = chip->speed_hz;
+
+ if (transfer->speed_hz != speed) {
+ speed = transfer->speed_hz;
+ if (speed > dws->max_freq) {
+ printk(KERN_ERR "MRST SPI0: unsupported"
+ "freq: %dHz\n", speed);
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ /* clk_div doesn't support odd number */
+ clk_div = dws->max_freq / speed;
+ clk_div = (clk_div >> 1) << 1;
+
+ chip->speed_hz = speed;
+ chip->clk_div = clk_div;
+ }
+ }
+ if (transfer->bits_per_word) {
+ bits = transfer->bits_per_word;
+
+ switch (bits) {
+ case 8:
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ dws->read = (dws->read != null_reader) ?
+ u8_reader : null_reader;
+ dws->write = (dws->write != null_writer) ?
+ u8_writer : null_writer;
+ break;
+ case 16:
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ dws->read = (dws->read != null_reader) ?
+ u16_reader : null_reader;
+ dws->write = (dws->write != null_writer) ?
+ u16_writer : null_writer;
+ break;
+ default:
+ printk(KERN_ERR "MRST SPI0: unsupported bits:"
+ "%db\n", bits);
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ cr0 = (bits - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+ }
+ message->state = RUNNING_STATE;
+
+ /* Check if current transfer is a DMA transaction */
+ dws->dma_mapped = map_dma_buffers(dws);
+
+ if (!dws->dma_mapped && !chip->poll_mode) {
+ if (dws->rx)
+ imask |= SPI_INT_RXFI;
+ if (dws->tx)
+ imask |= SPI_INT_TXEI;
+ dws->transfer_handler = interrupt_transfer;
+ }
+
+ /*
+ * Reprogram registers only if
+ * 1. chip select changes
+ * 2. clk_div is changed
+ * 3. control value changes
+ */
+ if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
+ spi_enable_chip(dws, 0);
+
+ if (dw_readw(dws, ctrl0) != cr0)
+ dw_writew(dws, ctrl0, cr0);
+
+ /* Set the interrupt mask, for poll mode just diable all int */
+ spi_mask_intr(dws, 0xff);
+ if (!chip->poll_mode)
+ spi_umask_intr(dws, imask);
+
+ spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
+ spi_chip_sel(dws, spi->chip_select);
+ spi_enable_chip(dws, 1);
+
+ if (cs_change)
+ dws->prev_chip = chip;
+ }
+
+ if (dws->dma_mapped)
+ dma_transfer(dws, cs_change);
+
+ if (chip->poll_mode)
+ poll_transfer(dws);
+
+ return;
+
+early_exit:
+ giveback(dws);
+ return;
+}
+
+static void pump_messages(struct work_struct *work)
+{
+ struct dw_spi *dws =
+ container_of(work, struct dw_spi, pump_messages);
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&dws->lock, flags);
+ if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
+ dws->busy = 0;
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (dws->cur_msg) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
+ list_del_init(&dws->cur_msg->queue);
+
+ /* Initial message state*/
+ dws->cur_msg->state = START_STATE;
+ dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+ dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&dws->pump_transfers);
+
+ dws->busy = 1;
+ spin_unlock_irqrestore(&dws->lock, flags);
+}
+
+/* spi_device use this to queue in their spi_msg */
+static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dws->lock, flags);
+
+ if (dws->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ list_add_tail(&msg->queue, &dws->queue);
+
+ if (dws->run == QUEUE_RUNNING && !dws->busy) {
+
+ if (dws->cur_transfer || dws->cur_msg)
+ queue_work(dws->workqueue,
+ &dws->pump_messages);
+ else {
+ /* If no other data transaction in air, just go */
+ spin_unlock_irqrestore(&dws->lock, flags);
+ pump_messages(&dws->pump_messages);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return 0;
+}
+
+/* This may be called twice for each spi dev */
+static int dw_spi_setup(struct spi_device *spi)
+{
+ struct dw_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
+ return -EINVAL;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->cs_control = null_cs_control;
+ chip->enable_dma = 0;
+ }
+
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it
+ */
+ chip_info = spi->controller_data;
+
+ /* chip_info doesn't always exist */
+ if (chip_info) {
+ if (chip_info->cs_control)
+ chip->cs_control = chip_info->cs_control;
+
+ chip->poll_mode = chip_info->poll_mode;
+ chip->type = chip_info->type;
+
+ chip->rx_threshold = 0;
+ chip->tx_threshold = 0;
+
+ chip->enable_dma = chip_info->enable_dma;
+ }
+
+ if (spi->bits_per_word <= 8) {
+ chip->n_bytes = 1;
+ chip->dma_width = 1;
+ chip->read = u8_reader;
+ chip->write = u8_writer;
+ } else if (spi->bits_per_word <= 16) {
+ chip->n_bytes = 2;
+ chip->dma_width = 2;
+ chip->read = u16_reader;
+ chip->write = u16_writer;
+ } else {
+ /* Never take >16b case for MRST SPIC */
+ dev_err(&spi->dev, "invalid wordsize\n");
+ return -EINVAL;
+ }
+ chip->bits_per_word = spi->bits_per_word;
+
+ chip->speed_hz = spi->max_speed_hz;
+ if (chip->speed_hz)
+ chip->clk_div = 25000000 / chip->speed_hz;
+ else
+ chip->clk_div = 8; /* default value */
+
+ chip->tmode = 0; /* Tx & Rx */
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ chip->cr0 = (chip->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+
+ spi_set_ctldata(spi, chip);
+ return 0;
+}
+
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+ kfree(chip);
+}
+
+static int __init init_queue(struct dw_spi *dws)
+{
+ INIT_LIST_HEAD(&dws->queue);
+ spin_lock_init(&dws->lock);
+
+ dws->run = QUEUE_STOPPED;
+ dws->busy = 0;
+
+ tasklet_init(&dws->pump_transfers,
+ pump_transfers, (unsigned long)dws);
+
+ INIT_WORK(&dws->pump_messages, pump_messages);
+ dws->workqueue = create_singlethread_workqueue(
+ dev_name(dws->master->dev.parent));
+ if (dws->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int start_queue(struct dw_spi *dws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dws->lock, flags);
+
+ if (dws->run == QUEUE_RUNNING || dws->busy) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return -EBUSY;
+ }
+
+ dws->run = QUEUE_RUNNING;
+ dws->cur_msg = NULL;
+ dws->cur_transfer = NULL;
+ dws->cur_chip = NULL;
+ dws->prev_chip = NULL;
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ queue_work(dws->workqueue, &dws->pump_messages);
+
+ return 0;
+}
+
+static int stop_queue(struct dw_spi *dws)
+{
+ unsigned long flags;
+ unsigned limit = 50;
+ int status = 0;
+
+ spin_lock_irqsave(&dws->lock, flags);
+ dws->run = QUEUE_STOPPED;
+ while (!list_empty(&dws->queue) && dws->busy && limit--) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&dws->lock, flags);
+ }
+
+ if (!list_empty(&dws->queue) || dws->busy)
+ status = -EBUSY;
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct dw_spi *dws)
+{
+ int status;
+
+ status = stop_queue(dws);
+ if (status != 0)
+ return status;
+ destroy_workqueue(dws->workqueue);
+ return 0;
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void spi_hw_init(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_mask_intr(dws, 0xff);
+ spi_enable_chip(dws, 1);
+ flush(dws);
+}
+
+int __devinit dw_spi_add_host(struct dw_spi *dws)
+{
+ struct spi_master *master;
+ int ret;
+
+ BUG_ON(dws == NULL);
+
+ master = spi_alloc_master(dws->parent_dev, 0);
+ if (!master) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ dws->master = master;
+ dws->type = SSI_MOTO_SPI;
+ dws->prev_chip = NULL;
+ dws->dma_inited = 0;
+ dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+
+ ret = request_irq(dws->irq, dw_spi_irq, 0,
+ "dw_spi", dws);
+ if (ret < 0) {
+ dev_err(&master->dev, "can not get IRQ\n");
+ goto err_free_master;
+ }
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bus_num = dws->bus_num;
+ master->num_chipselect = dws->num_cs;
+ master->cleanup = dw_spi_cleanup;
+ master->setup = dw_spi_setup;
+ master->transfer = dw_spi_transfer;
+
+ dws->dma_inited = 0;
+
+ /* Basic HW init */
+ spi_hw_init(dws);
+
+ /* Initial and start queue */
+ ret = init_queue(dws);
+ if (ret) {
+ dev_err(&master->dev, "problem initializing queue\n");
+ goto err_diable_hw;
+ }
+ ret = start_queue(dws);
+ if (ret) {
+ dev_err(&master->dev, "problem starting queue\n");
+ goto err_diable_hw;
+ }
+
+ spi_master_set_devdata(master, dws);
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&master->dev, "problem registering spi master\n");
+ goto err_queue_alloc;
+ }
+
+ mrst_spi_debugfs_init(dws);
+ return 0;
+
+err_queue_alloc:
+ destroy_queue(dws);
+err_diable_hw:
+ spi_enable_chip(dws, 0);
+ free_irq(dws->irq, dws);
+err_free_master:
+ spi_master_put(master);
+exit:
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_add_host);
+
+void __devexit dw_spi_remove_host(struct dw_spi *dws)
+{
+ int status = 0;
+
+ if (!dws)
+ return;
+ mrst_spi_debugfs_remove(dws);
+
+ /* Remove the queue */
+ status = destroy_queue(dws);
+ if (status != 0)
+ dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
+ "complete, message memory not freed\n");
+
+ spi_enable_chip(dws, 0);
+ /* Disable clk */
+ spi_set_clk(dws, 0);
+ free_irq(dws->irq, dws);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(dws->master);
+}
+
+int dw_spi_suspend_host(struct dw_spi *dws)
+{
+ int ret = 0;
+
+ ret = stop_queue(dws);
+ if (ret)
+ return ret;
+ spi_enable_chip(dws, 0);
+ spi_set_clk(dws, 0);
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_suspend_host);
+
+int dw_spi_resume_host(struct dw_spi *dws)
+{
+ int ret;
+
+ spi_hw_init(dws);
+ ret = start_queue(dws);
+ if (ret)
+ dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_resume_host);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
new file mode 100644
index 0000000..34ba691
--- /dev/null
+++ b/drivers/spi/dw_spi_pci.c
@@ -0,0 +1,169 @@
+/*
+ * mrst_spi_pci.c - PCI interface driver for DW SPI Core
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "dw_spi_pci"
+
+struct dw_spi_pci {
+ struct pci_dev *pdev;
+ struct dw_spi dws;
+};
+
+static int __devinit spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dw_spi_pci *dwpci;
+ struct dw_spi *dws;
+ int pci_bar = 0;
+ int ret;
+
+ printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
+ if (!dwpci) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ dwpci->pdev = pdev;
+ dws = &dwpci->dws;
+
+ /* Get basic io resource and map it */
+ dws->paddr = pci_resource_start(pdev, pci_bar);
+ dws->iolen = pci_resource_len(pdev, pci_bar);
+
+ ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ if (ret)
+ goto err_kfree;
+
+ dws->regs = ioremap_nocache((unsigned long)dws->paddr,
+ pci_resource_len(pdev, pci_bar));
+ if (!dws->regs) {
+ ret = -ENOMEM;
+ goto err_release_reg;
+ }
+
+ dws->parent_dev = &pdev->dev;
+ dws->bus_num = 0;
+ dws->num_cs = 4;
+ dws->max_freq = 25000000; /* for Moorestwon */
+ dws->irq = pdev->irq;
+
+ ret = dw_spi_add_host(dws);
+ if (ret)
+ goto err_unmap;
+
+ /* PCI hook and SPI hook use the same drv data */
+ pci_set_drvdata(pdev, dwpci);
+ return 0;
+
+err_unmap:
+ iounmap(dws->regs);
+err_release_reg:
+ pci_release_region(pdev, pci_bar);
+err_kfree:
+ kfree(dwpci);
+err_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit spi_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ iounmap(dwpci->dws.regs);
+ pci_release_region(pdev, 0);
+ kfree(dwpci);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = dw_spi_suspend_host(&dwpci->dws);
+ if (ret)
+ return ret;
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return ret;
+}
+
+static int spi_resume(struct pci_dev *pdev)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ return dw_spi_resume_host(&dwpci->dws);
+}
+#else
+#define spi_suspend NULL
+#define spi_resume NULL
+#endif
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+ /* Intel Moorestown platform SPI controller 0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+ {},
+};
+
+static struct pci_driver dw_spi_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pci_ids,
+ .probe = spi_pci_probe,
+ .remove = __devexit_p(spi_pci_remove),
+ .suspend = spi_suspend,
+ .resume = spi_resume,
+};
+
+static int __init mrst_spi_init(void)
+{
+ return pci_register_driver(&dw_spi_driver);
+}
+
+static void __exit mrst_spi_exit(void)
+{
+ pci_unregister_driver(&dw_spi_driver);
+}
+
+module_init(mrst_spi_init);
+module_exit(mrst_spi_exit);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 73e24ef..1d41058 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1294,7 +1294,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
- drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1));
+ drv_data->regs_base = ioremap(res->start, resource_size(res));
if (drv_data->regs_base == NULL) {
dev_err(dev, "Cannot map IO\n");
status = -ENXIO;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index e9390d7..1fb2a6e 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -1013,7 +1013,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
init_completion(&mpc8xxx_spi->done);
- mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
+ mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 2765915..c010733 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -1,7 +1,7 @@
/* linux/drivers/spi/spi_s3c24xx.c
*
* Copyright (c) 2006 Ben Dooks
- * Copyright (c) 2006 Simtec Electronics
+ * Copyright 2006-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,11 @@
#include <plat/regs-spi.h>
#include <mach/spi.h>
+#include <plat/fiq.h>
+#include <asm/fiq.h>
+
+#include "spi_s3c24xx_fiq.h"
+
/**
* s3c24xx_spi_devstate - per device data
* @hz: Last frequency calculated for @sppre field.
@@ -42,6 +47,13 @@ struct s3c24xx_spi_devstate {
u8 sppre;
};
+enum spi_fiq_mode {
+ FIQ_MODE_NONE = 0,
+ FIQ_MODE_TX = 1,
+ FIQ_MODE_RX = 2,
+ FIQ_MODE_TXRX = 3,
+};
+
struct s3c24xx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
@@ -52,6 +64,11 @@ struct s3c24xx_spi {
int len;
int count;
+ struct fiq_handler fiq_handler;
+ enum spi_fiq_mode fiq_mode;
+ unsigned char fiq_inuse;
+ unsigned char fiq_claimed;
+
void (*set_cs)(struct s3c2410_spi_info *spi,
int cs, int pol);
@@ -67,6 +84,7 @@ struct s3c24xx_spi {
struct s3c2410_spi_info *pdata;
};
+
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@@ -127,7 +145,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
}
if (spi->mode != cs->mode) {
- u8 spcon = SPCON_DEFAULT;
+ u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
if (spi->mode & SPI_CPHA)
spcon |= S3C2410_SPCON_CPHA_FMTB;
@@ -214,13 +232,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
return hw->tx ? hw->tx[count] : 0;
}
+#ifdef CONFIG_SPI_S3C24XX_FIQ
+/* Support for FIQ based pseudo-DMA to improve the transfer speed.
+ *
+ * This code uses the assembly helper in spi_s3c24xx_spi.S which is
+ * used by the FIQ core to move data between main memory and the peripheral
+ * block. Since this is code running on the processor, there is no problem
+ * with cache coherency of the buffers, so we can use any buffer we like.
+ */
+
+/**
+ * struct spi_fiq_code - FIQ code and header
+ * @length: The length of the code fragment, excluding this header.
+ * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
+ * @data: The code itself to install as a FIQ handler.
+ */
+struct spi_fiq_code {
+ u32 length;
+ u32 ack_offset;
+ u8 data[0];
+};
+
+extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
+
+/**
+ * ack_bit - turn IRQ into IRQ acknowledgement bit
+ * @irq: The interrupt number
+ *
+ * Returns the bit to write to the interrupt acknowledge register.
+ */
+static inline u32 ack_bit(unsigned int irq)
+{
+ return 1 << (irq - IRQ_EINT0);
+}
+
+/**
+ * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
+ * @hw: The hardware state.
+ *
+ * Claim the FIQ handler (only one can be active at any one time) and
+ * then setup the correct transfer code for this transfer.
+ *
+ * This call updates all the necessary state information if sucessful,
+ * so the caller does not need to do anything more than start the transfer
+ * as normal, since the IRQ will have been re-routed to the FIQ handler.
+*/
+void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+{
+ struct pt_regs regs;
+ enum spi_fiq_mode mode;
+ struct spi_fiq_code *code;
+ int ret;
+
+ if (!hw->fiq_claimed) {
+ /* try and claim fiq if we haven't got it, and if not
+ * then return and simply use another transfer method */
+
+ ret = claim_fiq(&hw->fiq_handler);
+ if (ret)
+ return;
+ }
+
+ if (hw->tx && !hw->rx)
+ mode = FIQ_MODE_TX;
+ else if (hw->rx && !hw->tx)
+ mode = FIQ_MODE_RX;
+ else
+ mode = FIQ_MODE_TXRX;
+
+ regs.uregs[fiq_rspi] = (long)hw->regs;
+ regs.uregs[fiq_rrx] = (long)hw->rx;
+ regs.uregs[fiq_rtx] = (long)hw->tx + 1;
+ regs.uregs[fiq_rcount] = hw->len - 1;
+ regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
+
+ set_fiq_regs(&regs);
+
+ if (hw->fiq_mode != mode) {
+ u32 *ack_ptr;
+
+ hw->fiq_mode = mode;
+
+ switch (mode) {
+ case FIQ_MODE_TX:
+ code = &s3c24xx_spi_fiq_tx;
+ break;
+ case FIQ_MODE_RX:
+ code = &s3c24xx_spi_fiq_rx;
+ break;
+ case FIQ_MODE_TXRX:
+ code = &s3c24xx_spi_fiq_txrx;
+ break;
+ default:
+ code = NULL;
+ }
+
+ BUG_ON(!code);
+
+ ack_ptr = (u32 *)&code->data[code->ack_offset];
+ *ack_ptr = ack_bit(hw->irq);
+
+ set_fiq_handler(&code->data, code->length);
+ }
+
+ s3c24xx_set_fiq(hw->irq, true);
+
+ hw->fiq_mode = mode;
+ hw->fiq_inuse = 1;
+}
+
+/**
+ * s3c24xx_spi_fiqop - FIQ core code callback
+ * @pw: Data registered with the handler
+ * @release: Whether this is a release or a return.
+ *
+ * Called by the FIQ code when another module wants to use the FIQ, so
+ * return whether we are currently using this or not and then update our
+ * internal state.
+ */
+static int s3c24xx_spi_fiqop(void *pw, int release)
+{
+ struct s3c24xx_spi *hw = pw;
+ int ret = 0;
+
+ if (release) {
+ if (hw->fiq_inuse)
+ ret = -EBUSY;
+
+ /* note, we do not need to unroute the FIQ, as the FIQ
+ * vector code de-routes it to signal the end of transfer */
+
+ hw->fiq_mode = FIQ_MODE_NONE;
+ hw->fiq_claimed = 0;
+ } else {
+ hw->fiq_claimed = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * s3c24xx_spi_initfiq - setup the information for the FIQ core
+ * @hw: The hardware state.
+ *
+ * Setup the fiq_handler block to pass to the FIQ core.
+ */
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
+{
+ hw->fiq_handler.dev_id = hw;
+ hw->fiq_handler.name = dev_name(hw->dev);
+ hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
+}
+
+/**
+ * s3c24xx_spi_usefiq - return if we should be using FIQ.
+ * @hw: The hardware state.
+ *
+ * Return true if the platform data specifies whether this channel is
+ * allowed to use the FIQ.
+ */
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
+{
+ return hw->pdata->use_fiq;
+}
+
+/**
+ * s3c24xx_spi_usingfiq - return if channel is using FIQ
+ * @spi: The hardware state.
+ *
+ * Return whether the channel is currently using the FIQ (separate from
+ * whether the FIQ is claimed).
+ */
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
+{
+ return spi->fiq_inuse;
+}
+#else
+
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
+static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
+
+#endif /* CONFIG_SPI_S3C24XX_FIQ */
+
static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct s3c24xx_spi *hw = to_hw(spi);
- dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
- t->tx_buf, t->rx_buf, t->len);
-
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->len = t->len;
@@ -228,11 +429,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
init_completion(&hw->done);
+ hw->fiq_inuse = 0;
+ if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
+ s3c24xx_spi_tryfiq(hw);
+
/* send the first byte */
writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
wait_for_completion(&hw->done);
-
return hw->count;
}
@@ -254,17 +458,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
goto irq_done;
}
- hw->count++;
+ if (!s3c24xx_spi_usingfiq(hw)) {
+ hw->count++;
- if (hw->rx)
- hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
+ if (hw->rx)
+ hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
- count++;
+ count++;
+
+ if (count < hw->len)
+ writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
+ else
+ complete(&hw->done);
+ } else {
+ hw->count = hw->len;
+ hw->fiq_inuse = 0;
+
+ if (hw->rx)
+ hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
- if (count < hw->len)
- writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
- else
complete(&hw->done);
+ }
irq_done:
return IRQ_HANDLED;
@@ -322,6 +536,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
init_completion(&hw->done);
+ /* initialise fiq handler */
+
+ s3c24xx_spi_initfiq(hw);
+
/* setup the master state. */
/* the spi->mode bits understood by this driver: */
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S
new file mode 100644
index 0000000..3793cae
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.S
@@ -0,0 +1,116 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.S
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include <mach/map.h>
+#include <mach/regs-irq.h>
+#include <plat/regs-spi.h>
+
+#include "spi_s3c24xx_fiq.h"
+
+ .text
+
+ @ entry to these routines is as follows, with the register names
+ @ defined in fiq.h so that they can be shared with the C files which
+ @ setup the calling registers.
+ @
+ @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
+ @ fiq_rtmp Temporary register to hold tx/rx data
+ @ fiq_rspi The base of the SPI register block
+ @ fiq_rtx The tx buffer pointer
+ @ fiq_rrx The rx buffer pointer
+ @ fiq_rcount The number of bytes to move
+
+ @ each entry starts with a word entry of how long it is
+ @ and an offset to the irq acknowledgment word
+
+ENTRY(s3c24xx_spi_fiq_rx)
+s3c24xx_spi_fix_rx:
+ .word fiq_rx_end - fiq_rx_start
+ .word fiq_rx_irq_ack - fiq_rx_start
+fiq_rx_start:
+ ldr fiq_rtmp, fiq_rx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ mov fiq_rtmp, #0xff
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ @@ set IRQ controller so that next op will trigger IRQ
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_rx_irq_ack:
+ .word 0
+fiq_rx_end:
+
+ENTRY(s3c24xx_spi_fiq_txrx)
+s3c24xx_spi_fiq_txrx:
+ .word fiq_txrx_end - fiq_txrx_start
+ .word fiq_txrx_irq_ack - fiq_txrx_start
+fiq_txrx_start:
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ ldr fiq_rtmp, fiq_txrx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_txrx_irq_ack:
+ .word 0
+
+fiq_txrx_end:
+
+ENTRY(s3c24xx_spi_fiq_tx)
+s3c24xx_spi_fix_tx:
+ .word fiq_tx_end - fiq_tx_start
+ .word fiq_tx_irq_ack - fiq_tx_start
+fiq_tx_start:
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+
+ ldr fiq_rtmp, fiq_tx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_tx_irq_ack:
+ .word 0
+
+fiq_tx_end:
+
+ .end
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h
new file mode 100644
index 0000000..a5950bb
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.h
@@ -0,0 +1,26 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.h
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* We have R8 through R13 to play with */
+
+#ifdef __ASSEMBLY__
+#define __REG_NR(x) r##x
+#else
+#define __REG_NR(x) (x)
+#endif
+
+#define fiq_rspi __REG_NR(8)
+#define fiq_rtmp __REG_NR(9)
+#define fiq_rrx __REG_NR(10)
+#define fiq_rtx __REG_NR(11)
+#define fiq_rcount __REG_NR(12)
+#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 0000000..88a456d
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1196 @@
+/* linux/drivers/spi/spi_s3c64xx.c
+ *
+ * Copyright (C) 2009 Samsung Electronics Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <mach/dma.h>
+#include <plat/spi.h>
+
+/* Registers and bit-fields */
+
+#define S3C64XX_SPI_CH_CFG 0x00
+#define S3C64XX_SPI_CLK_CFG 0x04
+#define S3C64XX_SPI_MODE_CFG 0x08
+#define S3C64XX_SPI_SLAVE_SEL 0x0C
+#define S3C64XX_SPI_INT_EN 0x10
+#define S3C64XX_SPI_STATUS 0x14
+#define S3C64XX_SPI_TX_DATA 0x18
+#define S3C64XX_SPI_RX_DATA 0x1C
+#define S3C64XX_SPI_PACKET_CNT 0x20
+#define S3C64XX_SPI_PENDING_CLR 0x24
+#define S3C64XX_SPI_SWAP_CFG 0x28
+#define S3C64XX_SPI_FB_CLK 0x2C
+
+#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
+#define S3C64XX_SPI_CH_SW_RST (1<<5)
+#define S3C64XX_SPI_CH_SLAVE (1<<4)
+#define S3C64XX_SPI_CPOL_L (1<<3)
+#define S3C64XX_SPI_CPHA_B (1<<2)
+#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
+#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
+
+#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
+#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
+#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
+#define S3C64XX_SPI_PSR_MASK 0xff
+
+#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
+#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
+#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
+#define S3C64XX_SPI_MODE_4BURST (1<<0)
+
+#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
+#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
+
+#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
+
+#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
+ (c)->regs + S3C64XX_SPI_SLAVE_SEL)
+
+#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
+#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
+#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
+#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
+#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
+#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
+#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+
+#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
+#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
+#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
+#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
+#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
+#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
+
+#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+
+#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
+#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
+#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
+#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
+#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
+
+#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
+#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
+#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
+#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
+#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
+#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
+#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
+#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
+
+#define S3C64XX_SPI_FBCLK_MSK (3<<0)
+
+#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
+ (((i)->fifo_lvl_mask + 1))) \
+ ? 1 : 0)
+
+#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
+ (((i)->fifo_lvl_mask + 1) << 1)) \
+ ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
+
+#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
+#define S3C64XX_SPI_TRAILCNT_OFF 19
+
+#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+#define SUSPND (1<<0)
+#define SPIBUSY (1<<1)
+#define RXBUSY (1<<2)
+#define TXBUSY (1<<3)
+
+/**
+ * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
+ * @clk: Pointer to the spi clock.
+ * @master: Pointer to the SPI Protocol master.
+ * @workqueue: Work queue for the SPI xfer requests.
+ * @cntrlr_info: Platform specific data for the controller this driver manages.
+ * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
+ * @work: Work
+ * @queue: To log SPI xfer requests.
+ * @lock: Controller specific lock.
+ * @state: Set of FLAGS to indicate status.
+ * @rx_dmach: Controller's DMA channel for Rx.
+ * @tx_dmach: Controller's DMA channel for Tx.
+ * @sfr_start: BUS address of SPI controller regs.
+ * @regs: Pointer to ioremap'ed controller registers.
+ * @xfer_completion: To indicate completion of xfer task.
+ * @cur_mode: Stores the active configuration of the controller.
+ * @cur_bpw: Stores the active bits per word settings.
+ * @cur_speed: Stores the active xfer clock speed.
+ */
+struct s3c64xx_spi_driver_data {
+ void __iomem *regs;
+ struct clk *clk;
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct workqueue_struct *workqueue;
+ struct s3c64xx_spi_cntrlr_info *cntrlr_info;
+ struct spi_device *tgl_spi;
+ struct work_struct work;
+ struct list_head queue;
+ spinlock_t lock;
+ enum dma_ch rx_dmach;
+ enum dma_ch tx_dmach;
+ unsigned long sfr_start;
+ struct completion xfer_completion;
+ unsigned state;
+ unsigned cur_mode, cur_bpw;
+ unsigned cur_speed;
+};
+
+static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
+ .name = "samsung-spi-dma",
+};
+
+static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned long loops;
+ u32 val;
+
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val |= S3C64XX_SPI_CH_SW_RST;
+ val &= ~S3C64XX_SPI_CH_HS_EN;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Flush TxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (TX_FIFO_LVL(val, sci) && loops--);
+
+ /* Flush RxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(val, sci))
+ readl(regs + S3C64XX_SPI_RX_DATA);
+ else
+ break;
+ } while (loops--);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~S3C64XX_SPI_CH_SW_RST;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+}
+
+static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ u32 modecfg, chcfg;
+
+ modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+
+ chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
+ chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
+
+ if (dma_mode) {
+ chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
+ } else {
+ /* Always shift in data in FIFO, even if xfer is Tx only,
+ * this helps setting PCKT_CNT value for generating clocks
+ * as exactly needed.
+ */
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ }
+
+ if (xfer->tx_buf != NULL) {
+ sdd->state |= TXBUSY;
+ chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+ s3c2410_dma_config(sdd->tx_dmach, 1);
+ s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
+ xfer->tx_dma, xfer->len);
+ s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
+ } else {
+ unsigned char *buf = (unsigned char *) xfer->tx_buf;
+ int i = 0;
+ while (i < xfer->len)
+ writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ sdd->state |= RXBUSY;
+
+ if (sci->high_speed && sdd->cur_speed >= 30000000UL
+ && !(sdd->cur_mode & SPI_CPHA))
+ chcfg |= S3C64XX_SPI_CH_HS_EN;
+
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ s3c2410_dma_config(sdd->rx_dmach, 1);
+ s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
+ xfer->rx_dma, xfer->len);
+ s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
+ }
+ }
+
+ writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+}
+
+static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+
+ if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
+ if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
+ /* Deselect the last toggled device */
+ cs = sdd->tgl_spi->controller_data;
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+ sdd->tgl_spi = NULL;
+ }
+
+ cs = spi->controller_data;
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
+}
+
+static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 5; /* some tolerance */
+
+ if (dma_mode) {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+ } else {
+ val = msecs_to_loops(ms);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+ }
+
+ if (!val)
+ return -EIO;
+
+ if (dma_mode) {
+ u32 status;
+
+ /*
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (xfer->rx_buf == NULL) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sci)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sci))
+ && --val) {
+ cpu_relax();
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ }
+
+ if (!val)
+ return -EIO;
+ }
+ } else {
+ unsigned char *buf;
+ int i;
+
+ /* If it was only Tx */
+ if (xfer->rx_buf == NULL) {
+ sdd->state &= ~TXBUSY;
+ return 0;
+ }
+
+ i = 0;
+ buf = xfer->rx_buf;
+ while (i < xfer->len)
+ buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
+
+ sdd->state &= ~RXBUSY;
+ }
+
+ return 0;
+}
+
+static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+
+ if (sdd->tgl_spi == spi)
+ sdd->tgl_spi = NULL;
+
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+}
+
+static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ u32 val;
+
+ /* Disable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Set Polarity and Phase */
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_SLAVE |
+ S3C64XX_SPI_CPOL_L |
+ S3C64XX_SPI_CPHA_B);
+
+ if (sdd->cur_mode & SPI_CPOL)
+ val |= S3C64XX_SPI_CPOL_L;
+
+ if (sdd->cur_mode & SPI_CPHA)
+ val |= S3C64XX_SPI_CPHA_B;
+
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Set Channel & DMA Mode */
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
+ | S3C64XX_SPI_MODE_CH_TSZ_MASK);
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
+ break;
+ case 16:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
+ break;
+ default:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
+ break;
+ }
+ val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
+
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ /* Configure Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_PSR_MASK;
+ val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
+ & S3C64XX_SPI_PSR_MASK);
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Enable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val |= S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+}
+
+void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
+{
+ struct s3c64xx_spi_driver_data *sdd = buf_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (res == S3C2410_RES_OK)
+ sdd->state &= ~RXBUSY;
+ else
+ dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
+
+ /* If the other done */
+ if (!(sdd->state & TXBUSY))
+ complete(&sdd->xfer_completion);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
+{
+ struct s3c64xx_spi_driver_data *sdd = buf_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (res == S3C2410_RES_OK)
+ sdd->state &= ~TXBUSY;
+ else
+ dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
+
+ /* If the other done */
+ if (!(sdd->state & RXBUSY))
+ complete(&sdd->xfer_completion);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+
+static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct device *dev = &sdd->pdev->dev;
+ struct spi_transfer *xfer;
+
+ if (msg->is_dma_mapped)
+ return 0;
+
+ /* First mark all xfer unmapped */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->rx_dma = XFER_DMAADDR_INVALID;
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ }
+
+ /* Map until end or first fail */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ if (xfer->tx_buf != NULL) {
+ xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma)) {
+ dev_err(dev, "dma_map_single Tx failed\n");
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ return -ENOMEM;
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ dev_err(dev, "dma_map_single Rx failed\n");
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ xfer->rx_dma = XFER_DMAADDR_INVALID;
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct device *dev = &sdd->pdev->dev;
+ struct spi_transfer *xfer;
+
+ if (msg->is_dma_mapped)
+ return;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ if (xfer->rx_buf != NULL
+ && xfer->rx_dma != XFER_DMAADDR_INVALID)
+ dma_unmap_single(dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+
+ if (xfer->tx_buf != NULL
+ && xfer->tx_dma != XFER_DMAADDR_INVALID)
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ }
+}
+
+static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct spi_device *spi = msg->spi;
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct spi_transfer *xfer;
+ int status = 0, cs_toggle = 0;
+ u32 speed;
+ u8 bpw;
+
+ /* If Master's(controller) state differs from that needed by Slave */
+ if (sdd->cur_speed != spi->max_speed_hz
+ || sdd->cur_mode != spi->mode
+ || sdd->cur_bpw != spi->bits_per_word) {
+ sdd->cur_bpw = spi->bits_per_word;
+ sdd->cur_speed = spi->max_speed_hz;
+ sdd->cur_mode = spi->mode;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Map all the transfers if needed */
+ if (s3c64xx_spi_map_mssg(sdd, msg)) {
+ dev_err(&spi->dev,
+ "Xfer: Unable to map message buffers!\n");
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* Configure feedback delay */
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ unsigned long flags;
+ int use_dma;
+
+ INIT_COMPLETION(sdd->xfer_completion);
+
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word ? : spi->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
+
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Polling method for xfers not bigger than FIFO capacity */
+ if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ use_dma = 0;
+ else
+ use_dma = 1;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
+
+ enable_datapath(sdd, spi, xfer, use_dma);
+
+ /* Slave Select */
+ enable_cs(sdd, spi);
+
+ /* Start the signals */
+ S3C64XX_SPI_ACT(sdd);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ status = wait_for_xfer(sdd, xfer, use_dma);
+
+ /* Quiese the signals */
+ S3C64XX_SPI_DEACT(sdd);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: \
+ rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ s3c2410_dma_ctrl(sdd->tx_dmach,
+ S3C2410_DMAOP_FLUSH);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ s3c2410_dma_ctrl(sdd->rx_dmach,
+ S3C2410_DMAOP_FLUSH);
+ }
+
+ goto out;
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ /* Hint that the next mssg is gonna be
+ for the same device */
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers))
+ cs_toggle = 1;
+ else
+ disable_cs(sdd, spi);
+ }
+
+ msg->actual_length += xfer->len;
+
+ flush_fifo(sdd);
+ }
+
+out:
+ if (!cs_toggle || status)
+ disable_cs(sdd, spi);
+ else
+ sdd->tgl_spi = spi;
+
+ s3c64xx_spi_unmap_mssg(sdd, msg);
+
+ msg->status = status;
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
+{
+ if (s3c2410_dma_request(sdd->rx_dmach,
+ &s3c64xx_spi_dma_client, NULL) < 0) {
+ dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
+ return 0;
+ }
+ s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
+ s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
+ sdd->sfr_start + S3C64XX_SPI_RX_DATA);
+
+ if (s3c2410_dma_request(sdd->tx_dmach,
+ &s3c64xx_spi_dma_client, NULL) < 0) {
+ dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
+ s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+ return 0;
+ }
+ s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
+ s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
+ sdd->sfr_start + S3C64XX_SPI_TX_DATA);
+
+ return 1;
+}
+
+static void s3c64xx_spi_work(struct work_struct *work)
+{
+ struct s3c64xx_spi_driver_data *sdd = container_of(work,
+ struct s3c64xx_spi_driver_data, work);
+ unsigned long flags;
+
+ /* Acquire DMA channels */
+ while (!acquire_dma(sdd))
+ msleep(10);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ while (!list_empty(&sdd->queue)
+ && !(sdd->state & SUSPND)) {
+
+ struct spi_message *msg;
+
+ msg = container_of(sdd->queue.next, struct spi_message, queue);
+
+ list_del_init(&msg->queue);
+
+ /* Set Xfer busy flag */
+ sdd->state |= SPIBUSY;
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ handle_msg(sdd, msg);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ sdd->state &= ~SPIBUSY;
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ /* Free DMA channels */
+ s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
+ s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+}
+
+static int s3c64xx_spi_transfer(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ unsigned long flags;
+
+ sdd = spi_master_get_devdata(spi->master);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (sdd->state & SUSPND) {
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->status = -EINPROGRESS;
+ msg->actual_length = 0;
+
+ list_add_tail(&msg->queue, &sdd->queue);
+
+ queue_work(sdd->workqueue, &sdd->work);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Here we only check the validity of requested configuration
+ * and save the configuration in a local data-structure.
+ * The controller is actually configured only just before we
+ * get a message to transfer.
+ */
+static int s3c64xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_cntrlr_info *sci;
+ struct spi_message *msg;
+ u32 psr, speed;
+ unsigned long flags;
+ int err = 0;
+
+ if (cs == NULL || cs->set_level == NULL) {
+ dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
+ return -ENODEV;
+ }
+
+ sdd = spi_master_get_devdata(spi->master);
+ sci = sdd->cntrlr_info;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ list_for_each_entry(msg, &sdd->queue, queue) {
+ /* Is some mssg is already queued for this device */
+ if (msg->spi == spi) {
+ dev_err(&spi->dev,
+ "setup: attempt while mssg in queue!\n");
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ if (sdd->state & SUSPND) {
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ dev_err(&spi->dev,
+ "setup: SPI-%d not active!\n", spi->master->bus_num);
+ return -ESHUTDOWN;
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ if (spi->bits_per_word != 8
+ && spi->bits_per_word != 16
+ && spi->bits_per_word != 32) {
+ dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
+ spi->bits_per_word);
+ err = -EINVAL;
+ goto setup_exit;
+ }
+
+ /* Check if we can provide the requested rate */
+ speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
+
+ if (spi->max_speed_hz > speed)
+ spi->max_speed_hz = speed;
+
+ psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
+ psr &= S3C64XX_SPI_PSR_MASK;
+ if (psr == S3C64XX_SPI_PSR_MASK)
+ psr--;
+
+ speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz < speed) {
+ if (psr+1 < S3C64XX_SPI_PSR_MASK) {
+ psr++;
+ } else {
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz >= speed)
+ spi->max_speed_hz = speed;
+ else
+ err = -EINVAL;
+
+setup_exit:
+
+ /* setup() returns with device de-selected */
+ disable_cs(sdd, spi);
+
+ return err;
+}
+
+static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned int val;
+
+ sdd->cur_speed = 0;
+
+ S3C64XX_SPI_DEACT(sdd);
+
+ /* Disable Interrupts - we use Polling if not DMA mode */
+ writel(0, regs + S3C64XX_SPI_INT_EN);
+
+ writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
+ regs + S3C64XX_SPI_CLK_CFG);
+ writel(0, regs + S3C64XX_SPI_MODE_CFG);
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ /* Clear any irq pending bits */
+ writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
+ regs + S3C64XX_SPI_PENDING_CLR);
+
+ writel(0, regs + S3C64XX_SPI_SWAP_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_4BURST;
+ val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ flush_fifo(sdd);
+}
+
+static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res, *dmatx_res, *dmarx_res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_cntrlr_info *sci;
+ struct spi_master *master;
+ int ret;
+
+ if (pdev->id < 0) {
+ dev_err(&pdev->dev,
+ "Invalid platform device id-%d\n", pdev->id);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
+ }
+
+ /* Check for availability of necessary resource */
+
+ dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (dmatx_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
+ return -ENXIO;
+ }
+
+ dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (dmarx_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
+ return -ENXIO;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct s3c64xx_spi_driver_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ sci = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, master);
+
+ sdd = spi_master_get_devdata(master);
+ sdd->master = master;
+ sdd->cntrlr_info = sci;
+ sdd->pdev = pdev;
+ sdd->sfr_start = mem_res->start;
+ sdd->tx_dmach = dmatx_res->start;
+ sdd->rx_dmach = dmarx_res->start;
+
+ sdd->cur_bpw = 8;
+
+ master->bus_num = pdev->id;
+ master->setup = s3c64xx_spi_setup;
+ master->transfer = s3c64xx_spi_transfer;
+ master->num_chipselect = sci->num_cs;
+ master->dma_alignment = 8;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ if (request_mem_region(mem_res->start,
+ resource_size(mem_res), pdev->name) == NULL) {
+ dev_err(&pdev->dev, "Req mem region failed\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
+ if (sdd->regs == NULL) {
+ dev_err(&pdev->dev, "Unable to remap IO\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
+ dev_err(&pdev->dev, "Unable to config gpio\n");
+ ret = -EBUSY;
+ goto err2;
+ }
+
+ /* Setup clocks */
+ sdd->clk = clk_get(&pdev->dev, "spi");
+ if (IS_ERR(sdd->clk)) {
+ dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
+ ret = PTR_ERR(sdd->clk);
+ goto err3;
+ }
+
+ if (clk_enable(sdd->clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
+ ret = -EBUSY;
+ goto err4;
+ }
+
+ if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
+ sci->src_clk = sdd->clk;
+ else
+ sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ if (IS_ERR(sci->src_clk)) {
+ dev_err(&pdev->dev,
+ "Unable to acquire clock '%s'\n", sci->src_clk_name);
+ ret = PTR_ERR(sci->src_clk);
+ goto err5;
+ }
+
+ if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
+ sci->src_clk_name);
+ ret = -EBUSY;
+ goto err6;
+ }
+
+ sdd->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (sdd->workqueue == NULL) {
+ dev_err(&pdev->dev, "Unable to create workqueue\n");
+ ret = -ENOMEM;
+ goto err7;
+ }
+
+ /* Setup Deufult Mode */
+ s3c64xx_spi_hwinit(sdd, pdev->id);
+
+ spin_lock_init(&sdd->lock);
+ init_completion(&sdd->xfer_completion);
+ INIT_WORK(&sdd->work, s3c64xx_spi_work);
+ INIT_LIST_HEAD(&sdd->queue);
+
+ if (spi_register_master(master)) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ ret = -EBUSY;
+ goto err8;
+ }
+
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
+ with %d Slaves attached\n",
+ pdev->id, master->num_chipselect);
+ dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
+ \tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res->end, mem_res->start,
+ sdd->rx_dmach, sdd->tx_dmach);
+
+ return 0;
+
+err8:
+ destroy_workqueue(sdd->workqueue);
+err7:
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+err6:
+ if (sci->src_clk != sdd->clk)
+ clk_put(sci->src_clk);
+err5:
+ clk_disable(sdd->clk);
+err4:
+ clk_put(sdd->clk);
+err3:
+err2:
+ iounmap((void *) sdd->regs);
+err1:
+ release_mem_region(mem_res->start, resource_size(mem_res));
+err0:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int s3c64xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct resource *mem_res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state |= SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ while (sdd->state & SPIBUSY)
+ msleep(10);
+
+ spi_unregister_master(master);
+
+ destroy_workqueue(sdd->workqueue);
+
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+
+ if (sci->src_clk != sdd->clk)
+ clk_put(sci->src_clk);
+
+ clk_disable(sdd->clk);
+ clk_put(sdd->clk);
+
+ iounmap((void *) sdd->regs);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem_res->start, resource_size(mem_res));
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_csinfo *cs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state |= SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ while (sdd->state & SPIBUSY)
+ msleep(10);
+
+ /* Disable the clock */
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+
+ clk_disable(sdd->clk);
+
+ sdd->cur_speed = 0; /* Output Clock is stopped */
+
+ return 0;
+}
+
+static int s3c64xx_spi_resume(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ unsigned long flags;
+
+ sci->cfg_gpio(pdev);
+
+ /* Enable the clock */
+ if (sci->src_clk != sdd->clk)
+ clk_enable(sci->src_clk);
+
+ clk_enable(sdd->clk);
+
+ s3c64xx_spi_hwinit(sdd, pdev->id);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state &= ~SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ return 0;
+}
+#else
+#define s3c64xx_spi_suspend NULL
+#define s3c64xx_spi_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver s3c64xx_spi_driver = {
+ .driver = {
+ .name = "s3c64xx-spi",
+ .owner = THIS_MODULE,
+ },
+ .remove = s3c64xx_spi_remove,
+ .suspend = s3c64xx_spi_suspend,
+ .resume = s3c64xx_spi_resume,
+};
+MODULE_ALIAS("platform:s3c64xx-spi");
+
+static int __init s3c64xx_spi_init(void)
+{
+ return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
+}
+module_init(s3c64xx_spi_init);
+
+static void __exit s3c64xx_spi_exit(void)
+{
+ platform_driver_unregister(&s3c64xx_spi_driver);
+}
+module_exit(s3c64xx_spi_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 51e5e1d..30973ec 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -173,15 +173,12 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
int edge;
/*
- * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!)
- * 0 0 10 10 1 0
- * 0 1 10 10 0 1
- * 1 0 11 11 0 1
- * 1 1 11 11 1 0
- *
- * (!) Note: REDG is inverted recommended data sheet setting
+ * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
+ * 0 0 10 10 1 1
+ * 0 1 10 10 0 0
+ * 1 0 11 11 0 0
+ * 1 1 11 11 1 1
*/
-
sh_msiof_write(p, FCTR, 0);
sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
@@ -193,7 +190,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
edge = cpol ? cpha : !cpha;
tmp |= edge << 27; /* TEDG */
- tmp |= !edge << 26; /* REDG */
+ tmp |= edge << 26; /* REDG */
tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
sh_msiof_write(p, CTR, tmp);
}
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index 7d36720..a65c12f 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
ret = -ENOENT;
goto err1;
}
- sp->membase = ioremap(r->start, r->end - r->start + 1);
+ sp->membase = ioremap(r->start, resource_size(r));
if (!sp->membase) {
ret = -ENXIO;
goto err1;
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 19f7562..dfa024b 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
- if (!devm_request_mem_region(&dev->dev,
- res->start, res->end - res->start + 1,
+ if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
"spi_txx9"))
goto exit_busy;
- c->membase = devm_ioremap(&dev->dev,
- res->start, res->end - res->start + 1);
+ c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
if (!c->membase)
goto exit_busy;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9c446e6..ea1bec3 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -53,7 +53,7 @@
#define SPIDEV_MAJOR 153 /* assigned */
#define N_SPI_MINORS 32 /* ... up to 256 */
-static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
/* Bit masks for spi_device.mode management. Note that incorrect
@@ -558,7 +558,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
-static int spidev_probe(struct spi_device *spi)
+static int __devinit spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
@@ -607,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
return status;
}
-static int spidev_remove(struct spi_device *spi)
+static int __devexit spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -629,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
return 0;
}
-static struct spi_driver spidev_spi = {
+static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
@@ -661,14 +661,14 @@ static int __init spidev_init(void)
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
- status = spi_register_driver(&spidev_spi);
+ status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
@@ -676,9 +676,9 @@ module_init(spidev_init);
static void __exit spidev_exit(void)
{
- spi_unregister_driver(&spidev_spi);
+ spi_unregister_driver(&spidev_spi_driver);
class_destroy(spidev_class);
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
module_exit(spidev_exit);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebe..03dfd27 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
#endif
break;
case SSB_BUSTYPE_SDIO:
-#ifdef CONFIG_SSB_SDIO
- sdev->irq = bus->host_sdio->dev.irq;
+#ifdef CONFIG_SSB_SDIOHOST
dev->parent = &bus->host_sdio->dev;
#endif
break;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 54e174d2..fc2e963 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -87,8 +87,6 @@ source "drivers/staging/frontier/Kconfig"
source "drivers/staging/dream/Kconfig"
-source "drivers/staging/dst/Kconfig"
-
source "drivers/staging/pohmelfs/Kconfig"
source "drivers/staging/b3dfg/Kconfig"
@@ -99,7 +97,7 @@ source "drivers/staging/p9auth/Kconfig"
source "drivers/staging/line6/Kconfig"
-source "drivers/gpu/drm/radeon/Kconfig"
+source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
@@ -143,5 +141,7 @@ source "drivers/staging/wavelan/Kconfig"
source "drivers/staging/netwave/Kconfig"
+source "drivers/staging/sm7xx/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 069864f..b5e67b8 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_INPUT_MIMIO) += mimio/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_DREAM) += dream/
-obj-$(CONFIG_DST) += dst/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_B3DFG) += b3dfg/
obj-$(CONFIG_IDE_PHISON) += phison/
@@ -53,3 +52,4 @@ obj-$(CONFIG_ARLAN) += arlan/
obj-$(CONFIG_WAVELAN) += wavelan/
obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
+obj-$(CONFIG_FB_SM7XX) += sm7xx/
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index f4c2657..43c57b7 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -194,9 +194,11 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
{
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
- int temp = strict_strtoul(buf, 10, NULL);
+ unsigned long value;
+ if (strict_strtoul(buf, 10, &value))
+ return -EINVAL;
- enable_oled(odev, temp);
+ enable_oled(odev, value);
return count;
}
@@ -207,10 +209,12 @@ static ssize_t class_set_enabled(struct device *device,
{
struct asus_oled_dev *odev =
(struct asus_oled_dev *) dev_get_drvdata(device);
+ unsigned long value;
- int temp = strict_strtoul(buf, 10, NULL);
+ if (strict_strtoul(buf, 10, &value))
+ return -EINVAL;
- enable_oled(odev, temp);
+ enable_oled(odev, value);
return count;
}
diff --git a/drivers/staging/batman-adv/Kconfig b/drivers/staging/batman-adv/Kconfig
index 7632f57..1d74dab 100644
--- a/drivers/staging/batman-adv/Kconfig
+++ b/drivers/staging/batman-adv/Kconfig
@@ -4,6 +4,7 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
+ depends on PROC_FS && PACKET
default n
---help---
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index d724798..eb61750 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -363,8 +363,10 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
return;
forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC);
- if (!forw_packet->packet_buff)
+ if (!forw_packet->packet_buff) {
+ kfree(forw_packet);
return;
+ }
forw_packet->packet_len = packet_len;
memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len);
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index ccc5cdc..b559a9c 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -451,7 +451,7 @@
#define COMEDI_CB_EOS 1 /* end of scan */
#define COMEDI_CB_EOA 2 /* end of acquisition */
-#define COMEDI_CB_BLOCK 4 /* DEPRECATED: convenient block size */
+#define COMEDI_CB_BLOCK 4 /* data has arrived: wakes up read() / write() */
#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
#define COMEDI_CB_ERROR 16 /* card error during acquisition */
#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 0d2c2eb23..bd39784 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -849,8 +849,11 @@ static int jr3_pci_attach(struct comedi_device *dev,
}
devpriv->pci_enabled = 1;
- devpriv->iobase =
- ioremap(pci_resource_start(card, 0), sizeof(struct jr3_t));
+ devpriv->iobase = ioremap(pci_resource_start(card, 0),
+ offsetof(struct jr3_t, channel[devpriv->n_channels]));
+ if (!devpriv->iobase)
+ return -ENOMEM;
+
result = alloc_subdevices(dev, devpriv->n_channels);
if (result < 0)
goto out;
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 06c0204..9a1b559 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v2.3"
+#define DRIVER_VERSION "v2.4"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
/*
@@ -81,6 +81,8 @@ sampling rate. If you sample two channels you get 4kHz and so on.
* 2.1: changed PWM API
* 2.2: added firmware kernel request to fix an udev problem
* 2.3: corrected a bug in bulk timeouts which were far too short
+ * 2.4: fixed a bug which causes the driver to hang when it ran out of data.
+ * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
*
*/
@@ -532,6 +534,7 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
}
}
/* tell comedi that data is there */
+ s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(this_usbduxsub->comedidev, s);
}
diff --git a/drivers/staging/cx25821/cx25821-medusa-video.c b/drivers/staging/cx25821/cx25821-medusa-video.c
index e4df813..1eb079b 100644
--- a/drivers/staging/cx25821/cx25821-medusa-video.c
+++ b/drivers/staging/cx25821/cx25821-medusa-video.c
@@ -860,10 +860,8 @@ int medusa_video_init(struct cx25821_dev *dev)
ret_val = medusa_set_videostandard(dev);
- if (ret_val < 0) {
- mutex_unlock(&dev->lock);
+ if (ret_val < 0)
return -EINVAL;
- }
return 1;
}
diff --git a/drivers/staging/dst/Kconfig b/drivers/staging/dst/Kconfig
deleted file mode 100644
index 448d342..0000000
--- a/drivers/staging/dst/Kconfig
+++ /dev/null
@@ -1,67 +0,0 @@
-config DST
- tristate "Distributed storage"
- depends on NET && CRYPTO && SYSFS && BLK_DEV
- select CONNECTOR
- ---help---
- DST is a network block device storage, which can be used to organize
- exported storage on the remote nodes into the local block device.
-
- DST works on top of any network media and protocol; it is just a matter
- of configuration utility to understand the correct addresses. The most
- common example is TCP over IP, which allows to pass through firewalls and
- create remote backup storage in a different datacenter. DST requires
- single port to be enabled on the exporting node and outgoing connections
- on the local node.
-
- DST works with in-kernel client and server, which improves performance by
- eliminating unneded data copies and by not depending on the version
- of the external IO components. It requires userspace configuration utility
- though.
-
- DST uses transaction model, when each store has to be explicitly acked
- from the remote node to be considered as successfully written. There
- may be lots of in-flight transactions. When remote host does not ack
- the transaction it will be resent predefined number of times with specified
- timeouts between them. All those parameters are configurable. Transactions
- are marked as failed after all resends complete unsuccessfully; having
- long enough resend timeout and/or large number of resends allows not to
- return error to the higher (FS usually) layer in case of short network
- problems or remote node outages. In case of network RAID setup this means
- that storage will not degrade until transactions are marked as failed, and
- thus will not force checksum recalculation and data rebuild. In case of
- connection failure DST will try to reconnect to the remote node automatically.
- DST sends ping commands at idle time to detect if remote node is alive.
-
- Because of transactional model it is possible to use zero-copy sending
- without worry of data corruption (which in turn could be detected by the
- strong checksums though).
-
- DST may fully encrypt the data channel in case of untrusted channel and implement
- strong checksum of the transferred data. It is possible to configure algorithms
- and crypto keys; they should match on both sides of the network channel.
- Crypto processing does not introduce noticeble performance overhead, since DST
- uses configurable pool of threads to perform crypto processing.
-
- DST utilizes memory pool model of all its transaction allocations (it is the
- only additional allocation on the client) and server allocations (bio pools,
- while pages are allocated from the slab).
-
- At startup DST performs a simple negotiation with the export node to determine
- access permissions and size of the exported storage. It can be extended if
- new parameters should be autonegotiated.
-
- DST carries block IO flags in the protocol, which allows to transparently implement
- barriers and sync/flush operations. Those flags are used in the export node where
- IO against the local storage is performed, which means that sync write will be sync
- on the remote node too, which in turn improves data integrity and improved resistance
- to errors and data corruption during power outages or storage damages.
-
- Homepage: http://www.ioremap.net/projects/dst
- Userspace configuration utility and the latest releases: http://www.ioremap.net/archive/dst/
-
-config DST_DEBUG
- bool "DST debug"
- depends on DST
- ---help---
- This option will enable HEAVY debugging of the DST.
- Turn it on ONLY if you have to debug some really obscure problem.
diff --git a/drivers/staging/dst/Makefile b/drivers/staging/dst/Makefile
deleted file mode 100644
index 3a8b0cf..0000000
--- a/drivers/staging/dst/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_DST) += nst.o
-
-nst-y := dcore.o state.o export.o thread_pool.o crypto.o trans.o
diff --git a/drivers/staging/dst/crypto.c b/drivers/staging/dst/crypto.c
deleted file mode 100644
index 351295c..0000000
--- a/drivers/staging/dst/crypto.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/bio.h>
-#include <linux/crypto.h>
-#include <linux/dst.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-/*
- * Tricky bastard, but IV can be more complex with time...
- */
-static inline u64 dst_gen_iv(struct dst_trans *t)
-{
- return t->gen;
-}
-
-/*
- * Crypto machinery: hash/cipher support for the given crypto controls.
- */
-static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key)
-{
- int err;
- struct crypto_hash *hash;
-
- hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hash)) {
- err = PTR_ERR(hash);
- dprintk("%s: failed to allocate hash '%s', err: %d.\n",
- __func__, ctl->hash_algo, err);
- goto err_out_exit;
- }
-
- ctl->crypto_attached_size = crypto_hash_digestsize(hash);
-
- if (!ctl->hash_keysize)
- return hash;
-
- err = crypto_hash_setkey(hash, key, ctl->hash_keysize);
- if (err) {
- dprintk("%s: failed to set key for hash '%s', err: %d.\n",
- __func__, ctl->hash_algo, err);
- goto err_out_free;
- }
-
- return hash;
-
-err_out_free:
- crypto_free_hash(hash);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl,
- u8 *key)
-{
- int err = -EINVAL;
- struct crypto_ablkcipher *cipher;
-
- if (!ctl->cipher_keysize)
- goto err_out_exit;
-
- cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0);
- if (IS_ERR(cipher)) {
- err = PTR_ERR(cipher);
- dprintk("%s: failed to allocate cipher '%s', err: %d.\n",
- __func__, ctl->cipher_algo, err);
- goto err_out_exit;
- }
-
- crypto_ablkcipher_clear_flags(cipher, ~0);
-
- err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize);
- if (err) {
- dprintk("%s: failed to set key for cipher '%s', err: %d.\n",
- __func__, ctl->cipher_algo, err);
- goto err_out_free;
- }
-
- return cipher;
-
-err_out_free:
- crypto_free_ablkcipher(cipher);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-/*
- * Crypto engine has a pool of pages to encrypt data into before sending
- * it over the network. This pool is freed/allocated here.
- */
-static void dst_crypto_pages_free(struct dst_crypto_engine *e)
-{
- unsigned int i;
-
- for (i = 0; i < e->page_num; ++i)
- __free_page(e->pages[i]);
- kfree(e->pages);
-}
-
-static int dst_crypto_pages_alloc(struct dst_crypto_engine *e, int num)
-{
- int i;
-
- e->pages = kmalloc(num * sizeof(struct page **), GFP_KERNEL);
- if (!e->pages)
- return -ENOMEM;
-
- for (i = 0; i < num; ++i) {
- e->pages[i] = alloc_page(GFP_KERNEL);
- if (!e->pages[i])
- goto err_out_free_pages;
- }
-
- e->page_num = num;
- return 0;
-
-err_out_free_pages:
- while (--i >= 0)
- __free_page(e->pages[i]);
-
- kfree(e->pages);
- return -ENOMEM;
-}
-
-/*
- * Initialize crypto engine for given node.
- * Setup cipher/hash, keys, pool of threads and private data.
- */
-static int dst_crypto_engine_init(struct dst_crypto_engine *e,
- struct dst_node *n)
-{
- int err;
- struct dst_crypto_ctl *ctl = &n->crypto;
-
- err = dst_crypto_pages_alloc(e, n->max_pages);
- if (err)
- goto err_out_exit;
-
- e->size = PAGE_SIZE;
- e->data = kmalloc(e->size, GFP_KERNEL);
- if (!e->data) {
- err = -ENOMEM;
- goto err_out_free_pages;
- }
-
- if (ctl->hash_algo[0]) {
- e->hash = dst_init_hash(ctl, n->hash_key);
- if (IS_ERR(e->hash)) {
- err = PTR_ERR(e->hash);
- e->hash = NULL;
- goto err_out_free;
- }
- }
-
- if (ctl->cipher_algo[0]) {
- e->cipher = dst_init_cipher(ctl, n->cipher_key);
- if (IS_ERR(e->cipher)) {
- err = PTR_ERR(e->cipher);
- e->cipher = NULL;
- goto err_out_free_hash;
- }
- }
-
- return 0;
-
-err_out_free_hash:
- crypto_free_hash(e->hash);
-err_out_free:
- kfree(e->data);
-err_out_free_pages:
- dst_crypto_pages_free(e);
-err_out_exit:
- return err;
-}
-
-static void dst_crypto_engine_exit(struct dst_crypto_engine *e)
-{
- if (e->hash)
- crypto_free_hash(e->hash);
- if (e->cipher)
- crypto_free_ablkcipher(e->cipher);
- dst_crypto_pages_free(e);
- kfree(e->data);
-}
-
-/*
- * Waiting for cipher processing to be completed.
- */
-struct dst_crypto_completion {
- struct completion complete;
- int error;
-};
-
-static void dst_crypto_complete(struct crypto_async_request *req, int err)
-{
- struct dst_crypto_completion *c = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
- c->error = err;
- complete(&c->complete);
-}
-
-static int dst_crypto_process(struct ablkcipher_request *req,
- struct scatterlist *sg_dst, struct scatterlist *sg_src,
- void *iv, int enc, unsigned long timeout)
-{
- struct dst_crypto_completion c;
- int err;
-
- init_completion(&c.complete);
- c.error = -EINPROGRESS;
-
- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- dst_crypto_complete, &c);
-
- ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
-
- if (enc)
- err = crypto_ablkcipher_encrypt(req);
- else
- err = crypto_ablkcipher_decrypt(req);
-
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- err = wait_for_completion_interruptible_timeout(&c.complete,
- timeout);
- if (!err)
- err = -ETIMEDOUT;
- else
- err = c.error;
- break;
- default:
- break;
- }
-
- return err;
-}
-
-/*
- * DST uses generic iteration approach for data crypto processing.
- * Single block IO request is switched into array of scatterlists,
- * which are submitted to the crypto processing iterator.
- *
- * Input and output iterator initialization are different, since
- * in output case we can not encrypt data in-place and need a
- * temporary storage, which is then being sent to the remote peer.
- */
-static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e,
- int (*iterator) (struct dst_crypto_engine *e,
- struct scatterlist *dst,
- struct scatterlist *src))
-{
- struct bio_vec *bv;
- int err, i;
-
- sg_init_table(e->src, bio->bi_vcnt);
- sg_init_table(e->dst, bio->bi_vcnt);
-
- bio_for_each_segment(bv, bio, i) {
- sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
- sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset);
-
- err = iterator(e, &e->dst[i], &e->src[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int dst_trans_iter_in(struct bio *bio, struct dst_crypto_engine *e,
- int (*iterator) (struct dst_crypto_engine *e,
- struct scatterlist *dst,
- struct scatterlist *src))
-{
- struct bio_vec *bv;
- int err, i;
-
- sg_init_table(e->src, bio->bi_vcnt);
- sg_init_table(e->dst, bio->bi_vcnt);
-
- bio_for_each_segment(bv, bio, i) {
- sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
- sg_set_page(&e->dst[i], bv->bv_page, bv->bv_len, bv->bv_offset);
-
- err = iterator(e, &e->dst[i], &e->src[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int dst_crypt_iterator(struct dst_crypto_engine *e,
- struct scatterlist *sg_dst, struct scatterlist *sg_src)
-{
- struct ablkcipher_request *req = e->data;
- u8 iv[32];
-
- memset(iv, 0, sizeof(iv));
-
- memcpy(iv, &e->iv, sizeof(e->iv));
-
- return dst_crypto_process(req, sg_dst, sg_src, iv, e->enc, e->timeout);
-}
-
-static int dst_crypt(struct dst_crypto_engine *e, struct bio *bio)
-{
- struct ablkcipher_request *req = e->data;
-
- memset(req, 0, sizeof(struct ablkcipher_request));
- ablkcipher_request_set_tfm(req, e->cipher);
-
- if (e->enc)
- return dst_trans_iter_out(bio, e, dst_crypt_iterator);
- else
- return dst_trans_iter_in(bio, e, dst_crypt_iterator);
-}
-
-static int dst_hash_iterator(struct dst_crypto_engine *e,
- struct scatterlist *sg_dst, struct scatterlist *sg_src)
-{
- return crypto_hash_update(e->data, sg_src, sg_src->length);
-}
-
-static int dst_hash(struct dst_crypto_engine *e, struct bio *bio, void *dst)
-{
- struct hash_desc *desc = e->data;
- int err;
-
- desc->tfm = e->hash;
- desc->flags = 0;
-
- err = crypto_hash_init(desc);
- if (err)
- return err;
-
- err = dst_trans_iter_in(bio, e, dst_hash_iterator);
- if (err)
- return err;
-
- err = crypto_hash_final(desc, dst);
- if (err)
- return err;
-
- return 0;
-}
-
-/*
- * Initialize/cleanup a crypto thread. The only thing it should
- * do is to allocate a pool of pages as temporary storage.
- * And to setup cipher and/or hash.
- */
-static void *dst_crypto_thread_init(void *data)
-{
- struct dst_node *n = data;
- struct dst_crypto_engine *e;
- int err = -ENOMEM;
-
- e = kzalloc(sizeof(struct dst_crypto_engine), GFP_KERNEL);
- if (!e)
- goto err_out_exit;
- e->src = kcalloc(2 * n->max_pages, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!e->src)
- goto err_out_free;
-
- e->dst = e->src + n->max_pages;
-
- err = dst_crypto_engine_init(e, n);
- if (err)
- goto err_out_free_all;
-
- return e;
-
-err_out_free_all:
- kfree(e->src);
-err_out_free:
- kfree(e);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-static void dst_crypto_thread_cleanup(void *private)
-{
- struct dst_crypto_engine *e = private;
-
- dst_crypto_engine_exit(e);
- kfree(e->src);
- kfree(e);
-}
-
-/*
- * Initialize crypto engine for given node: store keys, create pool
- * of threads, initialize each one.
- *
- * Each thread has unique ID, but 0 and 1 are reserved for receiving and
- * accepting threads (if export node), so IDs could start from 2, but starting
- * them from 10 allows easily understand what this thread is for.
- */
-int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl)
-{
- void *key = (ctl + 1);
- int err = -ENOMEM, i;
- char name[32];
-
- if (ctl->hash_keysize) {
- n->hash_key = kmalloc(ctl->hash_keysize, GFP_KERNEL);
- if (!n->hash_key)
- goto err_out_exit;
- memcpy(n->hash_key, key, ctl->hash_keysize);
- }
-
- if (ctl->cipher_keysize) {
- n->cipher_key = kmalloc(ctl->cipher_keysize, GFP_KERNEL);
- if (!n->cipher_key)
- goto err_out_free_hash;
- memcpy(n->cipher_key, key, ctl->cipher_keysize);
- }
- memcpy(&n->crypto, ctl, sizeof(struct dst_crypto_ctl));
-
- for (i = 0; i < ctl->thread_num; ++i) {
- snprintf(name, sizeof(name), "%s-crypto-%d", n->name, i);
- /* Unique ids... */
- err = thread_pool_add_worker(n->pool, name, i + 10,
- dst_crypto_thread_init, dst_crypto_thread_cleanup, n);
- if (err)
- goto err_out_free_threads;
- }
-
- return 0;
-
-err_out_free_threads:
- while (--i >= 0)
- thread_pool_del_worker_id(n->pool, i+10);
-
- if (ctl->cipher_keysize)
- kfree(n->cipher_key);
- ctl->cipher_keysize = 0;
-err_out_free_hash:
- if (ctl->hash_keysize)
- kfree(n->hash_key);
- ctl->hash_keysize = 0;
-err_out_exit:
- return err;
-}
-
-void dst_node_crypto_exit(struct dst_node *n)
-{
- struct dst_crypto_ctl *ctl = &n->crypto;
-
- if (ctl->cipher_algo[0] || ctl->hash_algo[0]) {
- kfree(n->hash_key);
- kfree(n->cipher_key);
- }
-}
-
-/*
- * Thrad pool setup callback. Just stores a transaction in private data.
- */
-static int dst_trans_crypto_setup(void *crypto_engine, void *trans)
-{
- struct dst_crypto_engine *e = crypto_engine;
-
- e->private = trans;
- return 0;
-}
-
-#if 0
-static void dst_dump_bio(struct bio *bio)
-{
- u8 *p;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment(bv, bio, i) {
- dprintk("%s: %llu/%u: size: %u, offset: %u, data: ",
- __func__, bio->bi_sector, bio->bi_size,
- bv->bv_len, bv->bv_offset);
-
- p = kmap(bv->bv_page) + bv->bv_offset;
- for (i = 0; i < bv->bv_len; ++i)
- printk(KERN_DEBUG "%02x ", p[i]);
- kunmap(bv->bv_page);
- printk("\n");
- }
-}
-#endif
-
-/*
- * Encrypt/hash data and send it to the network.
- */
-static int dst_crypto_process_sending(struct dst_crypto_engine *e,
- struct bio *bio, u8 *hash)
-{
- int err;
-
- if (e->cipher) {
- err = dst_crypt(e, bio);
- if (err)
- goto err_out_exit;
- }
-
- if (e->hash) {
- err = dst_hash(e, bio, hash);
- if (err)
- goto err_out_exit;
-
-#ifdef CONFIG_DST_DEBUG
- {
- unsigned int i;
-
- /* dst_dump_bio(bio); */
-
- printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash: ",
- __func__, (u64)bio->bi_sector,
- bio->bi_size, bio_data_dir(bio));
- for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
- printk("%02x ", hash[i]);
- printk("\n");
- }
-#endif
- }
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-/*
- * Check if received data is valid. Decipher if it is.
- */
-static int dst_crypto_process_receiving(struct dst_crypto_engine *e,
- struct bio *bio, u8 *hash, u8 *recv_hash)
-{
- int err;
-
- if (e->hash) {
- int mismatch;
-
- err = dst_hash(e, bio, hash);
- if (err)
- goto err_out_exit;
-
- mismatch = !!memcmp(recv_hash, hash,
- crypto_hash_digestsize(e->hash));
-#ifdef CONFIG_DST_DEBUG
- /* dst_dump_bio(bio); */
-
- printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash mismatch: %d",
- __func__, (u64)bio->bi_sector, bio->bi_size,
- bio_data_dir(bio), mismatch);
- if (mismatch) {
- unsigned int i;
-
- printk(", recv/calc: ");
- for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
- printk("%02x/%02x ", recv_hash[i], hash[i]);
-
- }
- printk("\n");
-#endif
- err = -1;
- if (mismatch)
- goto err_out_exit;
- }
-
- if (e->cipher) {
- err = dst_crypt(e, bio);
- if (err)
- goto err_out_exit;
- }
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-/*
- * Thread pool callback to encrypt data and send it to the netowork.
- */
-static int dst_trans_crypto_action(void *crypto_engine, void *schedule_data)
-{
- struct dst_crypto_engine *e = crypto_engine;
- struct dst_trans *t = schedule_data;
- struct bio *bio = t->bio;
- int err;
-
- dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n",
- __func__, t, t->gen, e->cipher, e->hash);
-
- e->enc = t->enc;
- e->iv = dst_gen_iv(t);
-
- if (bio_data_dir(bio) == WRITE) {
- err = dst_crypto_process_sending(e, bio, t->cmd.hash);
- if (err)
- goto err_out_exit;
-
- if (e->hash) {
- t->cmd.csize = crypto_hash_digestsize(e->hash);
- t->cmd.size += t->cmd.csize;
- }
-
- return dst_trans_send(t);
- } else {
- u8 *hash = e->data + e->size/2;
-
- err = dst_crypto_process_receiving(e, bio, hash, t->cmd.hash);
- if (err)
- goto err_out_exit;
-
- dst_trans_remove(t);
- dst_trans_put(t);
- }
-
- return 0;
-
-err_out_exit:
- t->error = err;
- dst_trans_put(t);
- return err;
-}
-
-/*
- * Schedule crypto processing for given transaction.
- */
-int dst_trans_crypto(struct dst_trans *t)
-{
- struct dst_node *n = t->n;
- int err;
-
- err = thread_pool_schedule(n->pool,
- dst_trans_crypto_setup, dst_trans_crypto_action,
- t, MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- dst_trans_put(t);
- return err;
-}
-
-/*
- * Crypto machinery for the export node.
- */
-static int dst_export_crypto_setup(void *crypto_engine, void *bio)
-{
- struct dst_crypto_engine *e = crypto_engine;
-
- e->private = bio;
- return 0;
-}
-
-static int dst_export_crypto_action(void *crypto_engine, void *schedule_data)
-{
- struct dst_crypto_engine *e = crypto_engine;
- struct bio *bio = schedule_data;
- struct dst_export_priv *p = bio->bi_private;
- int err;
-
- dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n",
- __func__, e, e->data, (u64)bio->bi_sector,
- bio->bi_size, bio_data_dir(bio));
-
- e->enc = (bio_data_dir(bio) == READ);
- e->iv = p->cmd.id;
-
- if (bio_data_dir(bio) == WRITE) {
- u8 *hash = e->data + e->size/2;
-
- err = dst_crypto_process_receiving(e, bio, hash, p->cmd.hash);
- if (err)
- goto err_out_exit;
-
- generic_make_request(bio);
- } else {
- err = dst_crypto_process_sending(e, bio, p->cmd.hash);
- if (err)
- goto err_out_exit;
-
- if (e->hash) {
- p->cmd.csize = crypto_hash_digestsize(e->hash);
- p->cmd.size += p->cmd.csize;
- }
-
- err = dst_export_send_bio(bio);
- }
- return 0;
-
-err_out_exit:
- bio_put(bio);
- return err;
-}
-
-int dst_export_crypto(struct dst_node *n, struct bio *bio)
-{
- int err;
-
- err = thread_pool_schedule(n->pool,
- dst_export_crypto_setup, dst_export_crypto_action,
- bio, MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- bio_put(bio);
- return err;
-}
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
deleted file mode 100644
index fd5bd0e..0000000
--- a/drivers/staging/dst/dcore.c
+++ /dev/null
@@ -1,1004 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/buffer_head.h>
-#include <linux/connector.h>
-#include <linux/dst.h>
-#include <linux/device.h>
-#include <linux/jhash.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-#include <linux/socket.h>
-
-#include <linux/in.h>
-#include <linux/in6.h>
-
-#include <net/sock.h>
-
-static int dst_major;
-
-static DEFINE_MUTEX(dst_hash_lock);
-static struct list_head *dst_hashtable;
-static unsigned int dst_hashtable_size = 128;
-module_param(dst_hashtable_size, uint, 0644);
-
-static char dst_name[] = "Dementianting goldfish";
-
-static DEFINE_IDR(dst_index_idr);
-static struct cb_id cn_dst_id = { CN_DST_IDX, CN_DST_VAL };
-
-/*
- * DST sysfs tree for device called 'storage':
- *
- * /sys/bus/dst/devices/storage/
- * /sys/bus/dst/devices/storage/type : 192.168.4.80:1025
- * /sys/bus/dst/devices/storage/size : 800
- * /sys/bus/dst/devices/storage/name : storage
- */
-
-static int dst_dev_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
-static struct bus_type dst_dev_bus_type = {
- .name = "dst",
- .match = &dst_dev_match,
-};
-
-static void dst_node_release(struct device *dev)
-{
- struct dst_info *info = container_of(dev, struct dst_info, device);
-
- kfree(info);
-}
-
-static struct device dst_node_dev = {
- .bus = &dst_dev_bus_type,
- .release = &dst_node_release
-};
-
-/*
- * Setting size of the node after it was changed.
- */
-static void dst_node_set_size(struct dst_node *n)
-{
- struct block_device *bdev;
-
- set_capacity(n->disk, n->size >> 9);
-
- bdev = bdget_disk(n->disk, 0);
- if (bdev) {
- mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, n->size);
- mutex_unlock(&bdev->bd_inode->i_mutex);
- bdput(bdev);
- }
-}
-
-/*
- * Distributed storage request processing function.
- */
-static int dst_request(struct request_queue *q, struct bio *bio)
-{
- struct dst_node *n = q->queuedata;
- int err = -EIO;
-
- if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
- /*
- * This is a dirty^Wnice hack, but if we complete this
- * operation with -EOPNOTSUPP like intended, XFS
- * will stuck and freeze the machine. This may be
- * not particulary XFS problem though, but it is the
- * only FS which sends empty barrier at umount time
- * I worked with.
- *
- * Empty barriers are not allowed anyway, see 51fd77bd9f512
- * for example, although later it was changed to
- * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
- * work in this case.
- */
- /* err = -EOPNOTSUPP; */
- err = 0;
- goto end_io;
- }
-
- bio_get(bio);
-
- return dst_process_bio(n, bio);
-
-end_io:
- bio_endio(bio, err);
- return err;
-}
-
-/*
- * Open/close callbacks for appropriate block device.
- */
-static int dst_bdev_open(struct block_device *bdev, fmode_t mode)
-{
- struct dst_node *n = bdev->bd_disk->private_data;
-
- dst_node_get(n);
- return 0;
-}
-
-static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
-{
- struct dst_node *n = disk->private_data;
-
- dst_node_put(n);
- return 0;
-}
-
-static struct block_device_operations dst_blk_ops = {
- .open = dst_bdev_open,
- .release = dst_bdev_release,
- .owner = THIS_MODULE,
-};
-
-/*
- * Block layer binding - disk is created when array is fully configured
- * by userspace request.
- */
-static int dst_node_create_disk(struct dst_node *n)
-{
- int err = -ENOMEM;
- u32 index = 0;
-
- n->queue = blk_init_queue(NULL, NULL);
- if (!n->queue)
- goto err_out_exit;
-
- n->queue->queuedata = n;
- blk_queue_make_request(n->queue, dst_request);
- blk_queue_max_phys_segments(n->queue, n->max_pages);
- blk_queue_max_hw_segments(n->queue, n->max_pages);
-
- err = -ENOMEM;
- n->disk = alloc_disk(1);
- if (!n->disk)
- goto err_out_free_queue;
-
- if (!(n->state->permissions & DST_PERM_WRITE)) {
- printk(KERN_INFO "DST node %s attached read-only.\n", n->name);
- set_disk_ro(n->disk, 1);
- }
-
- if (!idr_pre_get(&dst_index_idr, GFP_KERNEL))
- goto err_out_put;
-
- mutex_lock(&dst_hash_lock);
- err = idr_get_new(&dst_index_idr, NULL, &index);
- mutex_unlock(&dst_hash_lock);
- if (err)
- goto err_out_put;
-
- n->disk->major = dst_major;
- n->disk->first_minor = index;
- n->disk->fops = &dst_blk_ops;
- n->disk->queue = n->queue;
- n->disk->private_data = n;
- snprintf(n->disk->disk_name, sizeof(n->disk->disk_name),
- "dst-%s", n->name);
-
- return 0;
-
-err_out_put:
- put_disk(n->disk);
-err_out_free_queue:
- blk_cleanup_queue(n->queue);
-err_out_exit:
- return err;
-}
-
-/*
- * Sysfs machinery: show device's size.
- */
-static ssize_t dst_show_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dst_info *info = container_of(dev, struct dst_info, device);
-
- return sprintf(buf, "%llu\n", info->size);
-}
-
-/*
- * Show local exported device.
- */
-static ssize_t dst_show_local(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dst_info *info = container_of(dev, struct dst_info, device);
-
- return sprintf(buf, "%s\n", info->local);
-}
-
-/*
- * Shows type of the remote node - device major/minor number
- * for local nodes and address (af_inet ipv4/ipv6 only) for remote nodes.
- */
-static ssize_t dst_show_type(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dst_info *info = container_of(dev, struct dst_info, device);
- int family = info->net.addr.sa_family;
-
- if (family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)&info->net.addr;
- return sprintf(buf, "%u.%u.%u.%u:%d\n",
- NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
- } else if (family == AF_INET6) {
- struct sockaddr_in6 *sin = (struct sockaddr_in6 *)
- &info->net.addr;
- return sprintf(buf,
- "%pi6:%d\n",
- &sin->sin6_addr, ntohs(sin->sin6_port));
- } else {
- int i, sz = PAGE_SIZE - 2; /* 0 symbol and '\n' below */
- int size, addrlen = info->net.addr.sa_data_len;
- unsigned char *a = (unsigned char *)&info->net.addr.sa_data;
- char *buf_orig = buf;
-
- size = snprintf(buf, sz, "family: %d, addrlen: %u, addr: ",
- family, addrlen);
- sz -= size;
- buf += size;
-
- for (i = 0; i < addrlen; ++i) {
- if (sz < 3)
- break;
-
- size = snprintf(buf, sz, "%02x ", a[i]);
- sz -= size;
- buf += size;
- }
- buf += sprintf(buf, "\n");
-
- return buf - buf_orig;
- }
- return 0;
-}
-
-static struct device_attribute dst_node_attrs[] = {
- __ATTR(size, 0444, dst_show_size, NULL),
- __ATTR(type, 0444, dst_show_type, NULL),
- __ATTR(local, 0444, dst_show_local, NULL),
-};
-
-static int dst_create_node_attributes(struct dst_node *n)
-{
- int err, i;
-
- for (i = 0; i < ARRAY_SIZE(dst_node_attrs); ++i) {
- err = device_create_file(&n->info->device,
- &dst_node_attrs[i]);
- if (err)
- goto err_out_remove_all;
- }
- return 0;
-
-err_out_remove_all:
- while (--i >= 0)
- device_remove_file(&n->info->device,
- &dst_node_attrs[i]);
-
- return err;
-}
-
-static void dst_remove_node_attributes(struct dst_node *n)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dst_node_attrs); ++i)
- device_remove_file(&n->info->device,
- &dst_node_attrs[i]);
-}
-
-/*
- * Sysfs cleanup and initialization.
- * Shows number of useful parameters.
- */
-static void dst_node_sysfs_exit(struct dst_node *n)
-{
- if (n->info) {
- dst_remove_node_attributes(n);
- device_unregister(&n->info->device);
- n->info = NULL;
- }
-}
-
-static int dst_node_sysfs_init(struct dst_node *n)
-{
- int err;
-
- n->info = kzalloc(sizeof(struct dst_info), GFP_KERNEL);
- if (!n->info)
- return -ENOMEM;
-
- memcpy(&n->info->device, &dst_node_dev, sizeof(struct device));
- n->info->size = n->size;
-
- dev_set_name(&n->info->device, "dst-%s", n->name);
- err = device_register(&n->info->device);
- if (err) {
- dprintk(KERN_ERR "Failed to register node '%s', err: %d.\n",
- n->name, err);
- goto err_out_exit;
- }
-
- dst_create_node_attributes(n);
-
- return 0;
-
-err_out_exit:
- kfree(n->info);
- n->info = NULL;
- return err;
-}
-
-/*
- * DST node hash tables machinery.
- */
-static inline unsigned int dst_hash(char *str, unsigned int size)
-{
- return jhash(str, size, 0) % dst_hashtable_size;
-}
-
-static void dst_node_remove(struct dst_node *n)
-{
- mutex_lock(&dst_hash_lock);
- list_del_init(&n->node_entry);
- mutex_unlock(&dst_hash_lock);
-}
-
-static void dst_node_add(struct dst_node *n)
-{
- unsigned hash = dst_hash(n->name, sizeof(n->name));
-
- mutex_lock(&dst_hash_lock);
- list_add_tail(&n->node_entry, &dst_hashtable[hash]);
- mutex_unlock(&dst_hash_lock);
-}
-
-/*
- * Cleaning node when it is about to be freed.
- * There are still users of the socket though,
- * so connection cleanup should be protected.
- */
-static void dst_node_cleanup(struct dst_node *n)
-{
- struct dst_state *st = n->state;
-
- if (!st)
- return;
-
- if (n->queue) {
- blk_cleanup_queue(n->queue);
-
- mutex_lock(&dst_hash_lock);
- idr_remove(&dst_index_idr, n->disk->first_minor);
- mutex_unlock(&dst_hash_lock);
-
- put_disk(n->disk);
- }
-
- if (n->bdev) {
- sync_blockdev(n->bdev);
- blkdev_put(n->bdev, FMODE_READ|FMODE_WRITE);
- }
-
- dst_state_lock(st);
- st->need_exit = 1;
- dst_state_exit_connected(st);
- dst_state_unlock(st);
-
- wake_up(&st->thread_wait);
-
- dst_state_put(st);
- n->state = NULL;
-}
-
-/*
- * Free security attributes attached to given node.
- */
-static void dst_security_exit(struct dst_node *n)
-{
- struct dst_secure *s, *tmp;
-
- list_for_each_entry_safe(s, tmp, &n->security_list, sec_entry) {
- list_del(&s->sec_entry);
- kfree(s);
- }
-}
-
-/*
- * Free node when there are no more users.
- * Actually node has to be freed on behalf od userspace process,
- * since there are number of threads, which are embedded in the
- * node, so they can not exit and free node from there, that is
- * why there is a wakeup if reference counter is not equal to zero.
- */
-void dst_node_put(struct dst_node *n)
-{
- if (unlikely(!n))
- return;
-
- dprintk("%s: n: %p, refcnt: %d.\n",
- __func__, n, atomic_read(&n->refcnt));
-
- if (atomic_dec_and_test(&n->refcnt)) {
- dst_node_remove(n);
- n->trans_scan_timeout = 0;
- dst_node_cleanup(n);
- thread_pool_destroy(n->pool);
- dst_node_sysfs_exit(n);
- dst_node_crypto_exit(n);
- dst_security_exit(n);
- dst_node_trans_exit(n);
-
- kfree(n);
-
- dprintk("%s: freed n: %p.\n", __func__, n);
- } else {
- wake_up(&n->wait);
- }
-}
-
-/*
- * This function finds devices major/minor numbers for given pathname.
- */
-static int dst_lookup_device(const char *path, dev_t *dev)
-{
- int err;
- struct nameidata nd;
- struct inode *inode;
-
- err = path_lookup(path, LOOKUP_FOLLOW, &nd);
- if (err)
- return err;
-
- inode = nd.path.dentry->d_inode;
- if (!inode) {
- err = -ENOENT;
- goto out;
- }
-
- if (!S_ISBLK(inode->i_mode)) {
- err = -ENOTBLK;
- goto out;
- }
-
- *dev = inode->i_rdev;
-
-out:
- path_put(&nd.path);
- return err;
-}
-
-/*
- * Setting up export device: lookup by the name, get its size
- * and setup listening socket, which will accept clients, which
- * will submit IO for given storage.
- */
-static int dst_setup_export(struct dst_node *n, struct dst_ctl *ctl,
- struct dst_export_ctl *le)
-{
- int err;
- dev_t dev = 0; /* gcc likes to scream here */
-
- snprintf(n->info->local, sizeof(n->info->local), "%s", le->device);
-
- err = dst_lookup_device(le->device, &dev);
- if (err)
- return err;
-
- n->bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
- if (!n->bdev)
- return -ENODEV;
-
- if (n->size != 0)
- n->size = min_t(loff_t, n->bdev->bd_inode->i_size, n->size);
- else
- n->size = n->bdev->bd_inode->i_size;
-
- n->info->size = n->size;
- err = dst_node_init_listened(n, le);
- if (err)
- goto err_out_cleanup;
-
- return 0;
-
-err_out_cleanup:
- blkdev_put(n->bdev, FMODE_READ|FMODE_WRITE);
- n->bdev = NULL;
-
- return err;
-}
-
-/* Empty thread pool callbacks for the network processing threads. */
-static inline void *dst_thread_network_init(void *data)
-{
- dprintk("%s: data: %p.\n", __func__, data);
- return data;
-}
-
-static inline void dst_thread_network_cleanup(void *data)
-{
- dprintk("%s: data: %p.\n", __func__, data);
-}
-
-/*
- * Allocate DST node and initialize some of its parameters.
- */
-static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
- int (*start)(struct dst_node *),
- int num)
-{
- struct dst_node *n;
- int err;
-
- n = kzalloc(sizeof(struct dst_node), GFP_KERNEL);
- if (!n)
- return NULL;
-
- INIT_LIST_HEAD(&n->node_entry);
-
- INIT_LIST_HEAD(&n->security_list);
- mutex_init(&n->security_lock);
-
- init_waitqueue_head(&n->wait);
-
- n->trans_scan_timeout = msecs_to_jiffies(ctl->trans_scan_timeout);
- if (!n->trans_scan_timeout)
- n->trans_scan_timeout = HZ;
-
- n->trans_max_retries = ctl->trans_max_retries;
- if (!n->trans_max_retries)
- n->trans_max_retries = 10;
-
- /*
- * Pretty much arbitrary default numbers.
- * 32 matches maximum number of pages in bio originated from ext3 (31).
- */
- n->max_pages = ctl->max_pages;
- if (!n->max_pages)
- n->max_pages = 32;
-
- if (n->max_pages > 1024)
- n->max_pages = 1024;
-
- n->start = start;
- n->size = ctl->size;
-
- atomic_set(&n->refcnt, 1);
- atomic_long_set(&n->gen, 0);
- snprintf(n->name, sizeof(n->name), "%s", ctl->name);
-
- err = dst_node_sysfs_init(n);
- if (err)
- goto err_out_free;
-
- n->pool = thread_pool_create(num, n->name, dst_thread_network_init,
- dst_thread_network_cleanup, n);
- if (IS_ERR(n->pool)) {
- err = PTR_ERR(n->pool);
- goto err_out_sysfs_exit;
- }
-
- dprintk("%s: n: %p, name: %s.\n", __func__, n, n->name);
-
- return n;
-
-err_out_sysfs_exit:
- dst_node_sysfs_exit(n);
-err_out_free:
- kfree(n);
- return NULL;
-}
-
-/*
- * Starting a node, connected to the remote server:
- * register block device and initialize transaction mechanism.
- * In revers order though.
- *
- * It will autonegotiate some parameters with the remote node
- * and update local if needed.
- *
- * Transaction initialization should be the last thing before
- * starting the node, since transaction should include not only
- * block IO, but also crypto related data (if any), which are
- * initialized separately.
- */
-static int dst_start_remote(struct dst_node *n)
-{
- int err;
-
- err = dst_node_trans_init(n, sizeof(struct dst_trans));
- if (err)
- return err;
-
- err = dst_node_create_disk(n);
- if (err)
- return err;
-
- dst_node_set_size(n);
- add_disk(n->disk);
-
- dprintk("DST: started remote node '%s', minor: %d.\n",
- n->name, n->disk->first_minor);
-
- return 0;
-}
-
-/*
- * Adding remote node and initialize connection.
- */
-static int dst_add_remote(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- int err;
- struct dst_network_ctl *rctl = data;
-
- if (n)
- return -EEXIST;
-
- if (size != sizeof(struct dst_network_ctl))
- return -EINVAL;
-
- n = dst_alloc_node(ctl, dst_start_remote, 1);
- if (!n)
- return -ENOMEM;
-
- memcpy(&n->info->net, rctl, sizeof(struct dst_network_ctl));
- err = dst_node_init_connected(n, rctl);
- if (err)
- goto err_out_free;
-
- dst_node_add(n);
-
- return 0;
-
-err_out_free:
- dst_node_put(n);
- return err;
-}
-
-/*
- * Adding export node: initializing block device and listening socket.
- */
-static int dst_add_export(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- int err;
- struct dst_export_ctl *le = data;
-
- if (n)
- return -EEXIST;
-
- if (size != sizeof(struct dst_export_ctl))
- return -EINVAL;
-
- n = dst_alloc_node(ctl, dst_start_export, 2);
- if (!n)
- return -EINVAL;
-
- err = dst_setup_export(n, ctl, le);
- if (err)
- goto err_out_free;
-
- dst_node_add(n);
-
- return 0;
-
-err_out_free:
- dst_node_put(n);
- return err;
-}
-
-static int dst_node_remove_unload(struct dst_node *n)
-{
- printk(KERN_INFO "STOPPED name: '%s', size: %llu.\n",
- n->name, n->size);
-
- if (n->disk)
- del_gendisk(n->disk);
-
- dst_node_remove(n);
- dst_node_sysfs_exit(n);
-
- /*
- * This is not a hack. Really.
- * Node's reference counter allows to implement fine grained
- * node freeing, but since all transactions (which hold node's
- * reference counter) are processed in the dedicated thread,
- * it is possible that reference will hit zero in that thread,
- * so we will not be able to exit thread and cleanup the node.
- *
- * So, we remove disk, so no new activity is possible, and
- * wait until all pending transaction are completed (either
- * in receiving thread or by timeout in workqueue), in this
- * case reference counter will be less or equal to 2 (once set in
- * dst_alloc_node() and then in connector message parser;
- * or when we force module unloading, and connector message
- * parser does not hold a reference, in this case reference
- * counter will be equal to 1),
- * and subsequent dst_node_put() calls will free the node.
- */
- dprintk("%s: going to sleep with %d refcnt.\n",
- __func__, atomic_read(&n->refcnt));
- wait_event(n->wait, atomic_read(&n->refcnt) <= 2);
-
- dst_node_put(n);
- return 0;
-}
-
-/*
- * Remove node from the hash table.
- */
-static int dst_del_node(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- if (!n)
- return -ENODEV;
-
- return dst_node_remove_unload(n);
-}
-
-/*
- * Initialize crypto processing for given node.
- */
-static int dst_crypto_init(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- struct dst_crypto_ctl *crypto = data;
-
- if (!n)
- return -ENODEV;
-
- if (size != sizeof(struct dst_crypto_ctl) + crypto->hash_keysize +
- crypto->cipher_keysize)
- return -EINVAL;
-
- if (n->trans_cache)
- return -EEXIST;
-
- return dst_node_crypto_init(n, crypto);
-}
-
-/*
- * Security attributes for given node.
- */
-static int dst_security_init(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- struct dst_secure *s;
-
- if (!n)
- return -ENODEV;
-
- if (size != sizeof(struct dst_secure_user))
- return -EINVAL;
-
- s = kmalloc(sizeof(struct dst_secure), GFP_KERNEL);
- if (!s)
- return -ENOMEM;
-
- memcpy(&s->sec, data, size);
-
- mutex_lock(&n->security_lock);
- list_add_tail(&s->sec_entry, &n->security_list);
- mutex_unlock(&n->security_lock);
-
- return 0;
-}
-
-/*
- * Kill'em all!
- */
-static int dst_start_node(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size)
-{
- int err;
-
- if (!n)
- return -ENODEV;
-
- if (n->trans_cache)
- return 0;
-
- err = n->start(n);
- if (err)
- return err;
-
- printk(KERN_INFO "STARTED name: '%s', size: %llu.\n", n->name, n->size);
- return 0;
-}
-
-typedef int (*dst_command_func)(struct dst_node *n, struct dst_ctl *ctl,
- void *data, unsigned int size);
-
-/*
- * List of userspace commands.
- */
-static dst_command_func dst_commands[] = {
- [DST_ADD_REMOTE] = &dst_add_remote,
- [DST_ADD_EXPORT] = &dst_add_export,
- [DST_DEL_NODE] = &dst_del_node,
- [DST_CRYPTO] = &dst_crypto_init,
- [DST_SECURITY] = &dst_security_init,
- [DST_START] = &dst_start_node,
-};
-
-/*
- * Configuration parser.
- */
-static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
-{
- struct dst_ctl *ctl;
- int err;
- struct dst_ctl_ack ack;
- struct dst_node *n = NULL, *tmp;
- unsigned int hash;
-
- if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto out;
- }
-
- if (msg->len < sizeof(struct dst_ctl)) {
- err = -EBADMSG;
- goto out;
- }
-
- ctl = (struct dst_ctl *)msg->data;
-
- if (ctl->cmd >= DST_CMD_MAX) {
- err = -EINVAL;
- goto out;
- }
- hash = dst_hash(ctl->name, sizeof(ctl->name));
-
- mutex_lock(&dst_hash_lock);
- list_for_each_entry(tmp, &dst_hashtable[hash], node_entry) {
- if (!memcmp(tmp->name, ctl->name, sizeof(tmp->name))) {
- n = tmp;
- dst_node_get(n);
- break;
- }
- }
- mutex_unlock(&dst_hash_lock);
-
- err = dst_commands[ctl->cmd](n, ctl, msg->data + sizeof(struct dst_ctl),
- msg->len - sizeof(struct dst_ctl));
-
- dst_node_put(n);
-out:
- memcpy(&ack.msg, msg, sizeof(struct cn_msg));
-
- ack.msg.ack = msg->ack + 1;
- ack.msg.len = sizeof(struct dst_ctl_ack) - sizeof(struct cn_msg);
-
- ack.error = err;
-
- cn_netlink_send(&ack.msg, 0, GFP_KERNEL);
-}
-
-/*
- * Global initialization: sysfs, hash table, block device registration,
- * connector and various caches.
- */
-static int __init dst_sysfs_init(void)
-{
- return bus_register(&dst_dev_bus_type);
-}
-
-static void dst_sysfs_exit(void)
-{
- bus_unregister(&dst_dev_bus_type);
-}
-
-static int __init dst_hashtable_init(void)
-{
- unsigned int i;
-
- dst_hashtable = kcalloc(dst_hashtable_size, sizeof(struct list_head),
- GFP_KERNEL);
- if (!dst_hashtable)
- return -ENOMEM;
-
- for (i = 0; i < dst_hashtable_size; ++i)
- INIT_LIST_HEAD(&dst_hashtable[i]);
-
- return 0;
-}
-
-static void dst_hashtable_exit(void)
-{
- unsigned int i;
- struct dst_node *n, *tmp;
-
- for (i = 0; i < dst_hashtable_size; ++i) {
- list_for_each_entry_safe(n, tmp, &dst_hashtable[i], node_entry) {
- dst_node_remove_unload(n);
- }
- }
-
- kfree(dst_hashtable);
-}
-
-static int __init dst_sys_init(void)
-{
- int err = -ENOMEM;
-
- err = dst_hashtable_init();
- if (err)
- goto err_out_exit;
-
- err = dst_export_init();
- if (err)
- goto err_out_hashtable_exit;
-
- err = register_blkdev(dst_major, DST_NAME);
- if (err < 0)
- goto err_out_export_exit;
- if (err)
- dst_major = err;
-
- err = dst_sysfs_init();
- if (err)
- goto err_out_unregister;
-
- err = cn_add_callback(&cn_dst_id, "DST", cn_dst_callback);
- if (err)
- goto err_out_sysfs_exit;
-
- printk(KERN_INFO "Distributed storage, '%s' release.\n", dst_name);
-
- return 0;
-
-err_out_sysfs_exit:
- dst_sysfs_exit();
-err_out_unregister:
- unregister_blkdev(dst_major, DST_NAME);
-err_out_export_exit:
- dst_export_exit();
-err_out_hashtable_exit:
- dst_hashtable_exit();
-err_out_exit:
- return err;
-}
-
-static void __exit dst_sys_exit(void)
-{
- cn_del_callback(&cn_dst_id);
- unregister_blkdev(dst_major, DST_NAME);
- dst_hashtable_exit();
- dst_sysfs_exit();
- dst_export_exit();
-}
-
-module_init(dst_sys_init);
-module_exit(dst_sys_exit);
-
-MODULE_DESCRIPTION("Distributed storage");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dst/export.c b/drivers/staging/dst/export.c
deleted file mode 100644
index c324230..0000000
--- a/drivers/staging/dst/export.c
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/dst.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/socket.h>
-
-#include <net/sock.h>
-
-/*
- * Export bioset is used for server block IO requests.
- */
-static struct bio_set *dst_bio_set;
-
-int __init dst_export_init(void)
-{
- int err = -ENOMEM;
-
- dst_bio_set = bioset_create(32, sizeof(struct dst_export_priv));
- if (!dst_bio_set)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-void dst_export_exit(void)
-{
- bioset_free(dst_bio_set);
-}
-
-/*
- * When client connects and autonegotiates with the server node,
- * its permissions are checked in a security attributes and sent
- * back.
- */
-static unsigned int dst_check_permissions(struct dst_state *main,
- struct dst_state *st)
-{
- struct dst_node *n = main->node;
- struct dst_secure *sentry;
- struct dst_secure_user *s;
- struct saddr *sa = &st->ctl.addr;
- unsigned int perm = 0;
-
- mutex_lock(&n->security_lock);
- list_for_each_entry(sentry, &n->security_list, sec_entry) {
- s = &sentry->sec;
-
- if (s->addr.sa_family != sa->sa_family)
- continue;
-
- if (s->addr.sa_data_len != sa->sa_data_len)
- continue;
-
- /*
- * This '2' below is a port field. This may be very wrong to do
- * in atalk for example though. If there will be any need
- * to extent protocol to something else, I can create
- * per-family helpers and use them instead of this memcmp.
- */
- if (memcmp(s->addr.sa_data + 2, sa->sa_data + 2,
- sa->sa_data_len - 2))
- continue;
-
- perm = s->permissions;
- }
- mutex_unlock(&n->security_lock);
-
- return perm;
-}
-
-/*
- * Accept new client: allocate appropriate network state and check permissions.
- */
-static struct dst_state *dst_accept_client(struct dst_state *st)
-{
- unsigned int revents = 0;
- unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
- unsigned int mask = err_mask | POLLIN;
- struct dst_node *n = st->node;
- int err = 0;
- struct socket *sock = NULL;
- struct dst_state *new;
-
- while (!err && !sock) {
- revents = dst_state_poll(st);
-
- if (!(revents & mask)) {
- DEFINE_WAIT(wait);
-
- for (;;) {
- prepare_to_wait(&st->thread_wait,
- &wait, TASK_INTERRUPTIBLE);
- if (!n->trans_scan_timeout || st->need_exit)
- break;
-
- revents = dst_state_poll(st);
-
- if (revents & mask)
- break;
-
- if (signal_pending(current))
- break;
-
- /*
- * Magic HZ? Polling check above is not safe in
- * all cases (like socket reset in BH context),
- * so it is simpler just to postpone it to the
- * process context instead of implementing
- * special locking there.
- */
- schedule_timeout(HZ);
- }
- finish_wait(&st->thread_wait, &wait);
- }
-
- err = -ECONNRESET;
- dst_state_lock(st);
-
- dprintk("%s: st: %p, revents: %x [err: %d, in: %d].\n",
- __func__, st, revents, revents & err_mask,
- revents & POLLIN);
-
- if (revents & err_mask) {
- dprintk("%s: revents: %x, socket: %p, err: %d.\n",
- __func__, revents, st->socket, err);
- err = -ECONNRESET;
- }
-
- if (!n->trans_scan_timeout || st->need_exit)
- err = -ENODEV;
-
- if (st->socket && (revents & POLLIN))
- err = kernel_accept(st->socket, &sock, 0);
-
- dst_state_unlock(st);
- }
-
- if (err)
- goto err_out_exit;
-
- new = dst_state_alloc(st->node);
- if (IS_ERR(new)) {
- err = -ENOMEM;
- goto err_out_release;
- }
- new->socket = sock;
-
- new->ctl.addr.sa_data_len = sizeof(struct sockaddr);
- err = kernel_getpeername(sock, (struct sockaddr *)&new->ctl.addr,
- (int *)&new->ctl.addr.sa_data_len);
- if (err)
- goto err_out_put;
-
- new->permissions = dst_check_permissions(st, new);
- if (new->permissions == 0) {
- err = -EPERM;
- dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
- "Client is not allowed to connect");
- goto err_out_put;
- }
-
- err = dst_poll_init(new);
- if (err)
- goto err_out_put;
-
- dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
- "Connected client");
-
- return new;
-
-err_out_put:
- dst_state_put(new);
-err_out_release:
- sock_release(sock);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-/*
- * Each server's block request sometime finishes.
- * Usually it happens in hard irq context of the appropriate controller,
- * so to play good with all cases we just queue BIO into the queue
- * and wake up processing thread, which gets completed request and
- * send (encrypting if needed) it back to the client (if it was a read
- * request), or sends back reply that writing successfully completed.
- */
-static int dst_export_process_request_queue(struct dst_state *st)
-{
- unsigned long flags;
- struct dst_export_priv *p = NULL;
- struct bio *bio;
- int err = 0;
-
- while (!list_empty(&st->request_list)) {
- spin_lock_irqsave(&st->request_lock, flags);
- if (!list_empty(&st->request_list)) {
- p = list_first_entry(&st->request_list,
- struct dst_export_priv, request_entry);
- list_del(&p->request_entry);
- }
- spin_unlock_irqrestore(&st->request_lock, flags);
-
- if (!p)
- break;
-
- bio = p->bio;
-
- if (dst_need_crypto(st->node) && (bio_data_dir(bio) == READ))
- err = dst_export_crypto(st->node, bio);
- else
- err = dst_export_send_bio(bio);
-
- if (err)
- break;
- }
-
- return err;
-}
-
-/*
- * Cleanup export state.
- * It has to wait until all requests are finished,
- * and then free them all.
- */
-static void dst_state_cleanup_export(struct dst_state *st)
-{
- struct dst_export_priv *p;
- unsigned long flags;
-
- /*
- * This loop waits for all pending bios to be completed and freed.
- */
- while (atomic_read(&st->refcnt) > 1) {
- dprintk("%s: st: %p, refcnt: %d, list_empty: %d.\n",
- __func__, st, atomic_read(&st->refcnt),
- list_empty(&st->request_list));
- wait_event_timeout(st->thread_wait,
- (atomic_read(&st->refcnt) == 1) ||
- !list_empty(&st->request_list),
- HZ/2);
-
- while (!list_empty(&st->request_list)) {
- p = NULL;
- spin_lock_irqsave(&st->request_lock, flags);
- if (!list_empty(&st->request_list)) {
- p = list_first_entry(&st->request_list,
- struct dst_export_priv, request_entry);
- list_del(&p->request_entry);
- }
- spin_unlock_irqrestore(&st->request_lock, flags);
-
- if (p)
- bio_put(p->bio);
-
- dprintk("%s: st: %p, refcnt: %d, list_empty: %d, p: "
- "%p.\n", __func__, st, atomic_read(&st->refcnt),
- list_empty(&st->request_list), p);
- }
- }
-
- dst_state_put(st);
-}
-
-/*
- * Client accepting thread.
- * Not only accepts new connection, but also schedules receiving thread
- * and performs request completion described above.
- */
-static int dst_accept(void *init_data, void *schedule_data)
-{
- struct dst_state *main_st = schedule_data;
- struct dst_node *n = init_data;
- struct dst_state *st;
- int err;
-
- while (n->trans_scan_timeout && !main_st->need_exit) {
- dprintk("%s: main_st: %p, n: %p.\n", __func__, main_st, n);
- st = dst_accept_client(main_st);
- if (IS_ERR(st))
- continue;
-
- err = dst_state_schedule_receiver(st);
- if (!err) {
- while (n->trans_scan_timeout) {
- err = wait_event_interruptible_timeout(st->thread_wait,
- !list_empty(&st->request_list) ||
- !n->trans_scan_timeout ||
- st->need_exit,
- HZ);
-
- if (!n->trans_scan_timeout || st->need_exit)
- break;
-
- if (list_empty(&st->request_list))
- continue;
-
- err = dst_export_process_request_queue(st);
- if (err)
- break;
- }
-
- st->need_exit = 1;
- wake_up(&st->thread_wait);
- }
-
- dst_state_cleanup_export(st);
- }
-
- dprintk("%s: freeing listening socket st: %p.\n", __func__, main_st);
-
- dst_state_lock(main_st);
- dst_poll_exit(main_st);
- dst_state_socket_release(main_st);
- dst_state_unlock(main_st);
- dst_state_put(main_st);
- dprintk("%s: freed listening socket st: %p.\n", __func__, main_st);
-
- return 0;
-}
-
-int dst_start_export(struct dst_node *n)
-{
- if (list_empty(&n->security_list)) {
- printk(KERN_ERR "You are trying to export node '%s' "
- "without security attributes.\nNo clients will "
- "be allowed to connect. Exiting.\n", n->name);
- return -EINVAL;
- }
- return dst_node_trans_init(n, sizeof(struct dst_export_priv));
-}
-
-/*
- * Initialize listening state and schedule accepting thread.
- */
-int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le)
-{
- struct dst_state *st;
- int err = -ENOMEM;
- struct dst_network_ctl *ctl = &le->ctl;
-
- memcpy(&n->info->net, ctl, sizeof(struct dst_network_ctl));
-
- st = dst_state_alloc(n);
- if (IS_ERR(st)) {
- err = PTR_ERR(st);
- goto err_out_exit;
- }
- memcpy(&st->ctl, ctl, sizeof(struct dst_network_ctl));
-
- err = dst_state_socket_create(st);
- if (err)
- goto err_out_put;
-
- st->socket->sk->sk_reuse = 1;
-
- err = kernel_bind(st->socket, (struct sockaddr *)&ctl->addr,
- ctl->addr.sa_data_len);
- if (err)
- goto err_out_socket_release;
-
- err = kernel_listen(st->socket, 1024);
- if (err)
- goto err_out_socket_release;
- n->state = st;
-
- err = dst_poll_init(st);
- if (err)
- goto err_out_socket_release;
-
- dst_state_get(st);
-
- err = thread_pool_schedule(n->pool, dst_thread_setup,
- dst_accept, st, MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_out_poll_exit;
-
- return 0;
-
-err_out_poll_exit:
- dst_poll_exit(st);
-err_out_socket_release:
- dst_state_socket_release(st);
-err_out_put:
- dst_state_put(st);
-err_out_exit:
- n->state = NULL;
- return err;
-}
-
-/*
- * Free bio and related private data.
- * Also drop a reference counter for appropriate state,
- * which waits when there are no more block IOs in-flight.
- */
-static void dst_bio_destructor(struct bio *bio)
-{
- struct bio_vec *bv;
- struct dst_export_priv *priv = bio->bi_private;
- int i;
-
- bio_for_each_segment(bv, bio, i) {
- if (!bv->bv_page)
- break;
-
- __free_page(bv->bv_page);
- }
-
- if (priv)
- dst_state_put(priv->state);
- bio_free(bio, dst_bio_set);
-}
-
-/*
- * Block IO completion. Queue request to be sent back to
- * the client (or just confirmation).
- */
-static void dst_bio_end_io(struct bio *bio, int err)
-{
- struct dst_export_priv *p = bio->bi_private;
- struct dst_state *st = p->state;
- unsigned long flags;
-
- spin_lock_irqsave(&st->request_lock, flags);
- list_add_tail(&p->request_entry, &st->request_list);
- spin_unlock_irqrestore(&st->request_lock, flags);
-
- wake_up(&st->thread_wait);
-}
-
-/*
- * Allocate read request for the server.
- */
-static int dst_export_read_request(struct bio *bio, unsigned int total_size)
-{
- unsigned int size;
- struct page *page;
- int err;
-
- while (total_size) {
- err = -ENOMEM;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto err_out_exit;
-
- size = min_t(unsigned int, PAGE_SIZE, total_size);
-
- err = bio_add_page(bio, page, size, 0);
- dprintk("%s: bio: %llu/%u, size: %u, err: %d.\n",
- __func__, (u64)bio->bi_sector, bio->bi_size,
- size, err);
- if (err <= 0)
- goto err_out_free_page;
-
- total_size -= size;
- }
-
- return 0;
-
-err_out_free_page:
- __free_page(page);
-err_out_exit:
- return err;
-}
-
-/*
- * Allocate write request for the server.
- * Should not only get pages, but also read data from the network.
- */
-static int dst_export_write_request(struct dst_state *st,
- struct bio *bio, unsigned int total_size)
-{
- unsigned int size;
- struct page *page;
- void *data;
- int err;
-
- while (total_size) {
- err = -ENOMEM;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto err_out_exit;
-
- data = kmap(page);
- if (!data)
- goto err_out_free_page;
-
- size = min_t(unsigned int, PAGE_SIZE, total_size);
-
- err = dst_data_recv(st, data, size);
- if (err)
- goto err_out_unmap_page;
-
- err = bio_add_page(bio, page, size, 0);
- if (err <= 0)
- goto err_out_unmap_page;
-
- kunmap(page);
-
- total_size -= size;
- }
-
- return 0;
-
-err_out_unmap_page:
- kunmap(page);
-err_out_free_page:
- __free_page(page);
-err_out_exit:
- return err;
-}
-
-/*
- * Groovy, we've gotten an IO request from the client.
- * Allocate BIO from the bioset, private data from the mempool
- * and lots of pages for IO.
- */
-int dst_process_io(struct dst_state *st)
-{
- struct dst_node *n = st->node;
- struct dst_cmd *cmd = st->data;
- struct bio *bio;
- struct dst_export_priv *priv;
- int err = -ENOMEM;
-
- if (unlikely(!n->bdev)) {
- err = -EINVAL;
- goto err_out_exit;
- }
-
- bio = bio_alloc_bioset(GFP_KERNEL,
- PAGE_ALIGN(cmd->size) >> PAGE_SHIFT,
- dst_bio_set);
- if (!bio)
- goto err_out_exit;
-
- priv = (struct dst_export_priv *)(((void *)bio) -
- sizeof (struct dst_export_priv));
-
- priv->state = dst_state_get(st);
- priv->bio = bio;
-
- bio->bi_private = priv;
- bio->bi_end_io = dst_bio_end_io;
- bio->bi_destructor = dst_bio_destructor;
- bio->bi_bdev = n->bdev;
-
- /*
- * Server side is only interested in two low bits:
- * uptodate (set by itself actually) and rw block
- */
- bio->bi_flags |= cmd->flags & 3;
-
- bio->bi_rw = cmd->rw;
- bio->bi_size = 0;
- bio->bi_sector = cmd->sector;
-
- dst_bio_to_cmd(bio, &priv->cmd, DST_IO_RESPONSE, cmd->id);
-
- priv->cmd.flags = 0;
- priv->cmd.size = cmd->size;
-
- if (bio_data_dir(bio) == WRITE) {
- err = dst_recv_cdata(st, priv->cmd.hash);
- if (err)
- goto err_out_free;
-
- err = dst_export_write_request(st, bio, cmd->size);
- if (err)
- goto err_out_free;
-
- if (dst_need_crypto(n))
- return dst_export_crypto(n, bio);
- } else {
- err = dst_export_read_request(bio, cmd->size);
- if (err)
- goto err_out_free;
- }
-
- dprintk("%s: bio: %llu/%u, rw: %lu, dir: %lu, flags: %lx, phys: %d.\n",
- __func__, (u64)bio->bi_sector, bio->bi_size,
- bio->bi_rw, bio_data_dir(bio),
- bio->bi_flags, bio->bi_phys_segments);
-
- generic_make_request(bio);
-
- return 0;
-
-err_out_free:
- bio_put(bio);
-err_out_exit:
- return err;
-}
-
-/*
- * Ok, block IO is ready, let's send it back to the client...
- */
-int dst_export_send_bio(struct bio *bio)
-{
- struct dst_export_priv *p = bio->bi_private;
- struct dst_state *st = p->state;
- struct dst_cmd *cmd = &p->cmd;
- int err;
-
- dprintk("%s: id: %llu, bio: %llu/%u, csize: %u, flags: %lu, rw: %lu.\n",
- __func__, cmd->id, (u64)bio->bi_sector, bio->bi_size,
- cmd->csize, bio->bi_flags, bio->bi_rw);
-
- dst_convert_cmd(cmd);
-
- dst_state_lock(st);
- if (!st->socket) {
- err = -ECONNRESET;
- goto err_out_unlock;
- }
-
- if (bio_data_dir(bio) == WRITE) {
- /* ... or just confirmation that writing has completed. */
- cmd->size = cmd->csize = 0;
- err = dst_data_send_header(st->socket, cmd,
- sizeof(struct dst_cmd), 0);
- if (err)
- goto err_out_unlock;
- } else {
- err = dst_send_bio(st, cmd, bio);
- if (err)
- goto err_out_unlock;
- }
-
- dst_state_unlock(st);
-
- bio_put(bio);
- return 0;
-
-err_out_unlock:
- dst_state_unlock(st);
-
- bio_put(bio);
- return err;
-}
diff --git a/drivers/staging/dst/state.c b/drivers/staging/dst/state.c
deleted file mode 100644
index 02a05e6..0000000
--- a/drivers/staging/dst/state.c
+++ /dev/null
@@ -1,844 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/buffer_head.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/connector.h>
-#include <linux/dst.h>
-#include <linux/device.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/socket.h>
-#include <linux/slab.h>
-
-#include <net/sock.h>
-
-/*
- * Polling machinery.
- */
-
-struct dst_poll_helper {
- poll_table pt;
- struct dst_state *st;
-};
-
-static int dst_queue_wake(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
-{
- struct dst_state *st = container_of(wait, struct dst_state, wait);
-
- wake_up(&st->thread_wait);
- return 1;
-}
-
-static void dst_queue_func(struct file *file, wait_queue_head_t *whead,
- poll_table *pt)
-{
- struct dst_state *st = container_of(pt, struct dst_poll_helper, pt)->st;
-
- st->whead = whead;
- init_waitqueue_func_entry(&st->wait, dst_queue_wake);
- add_wait_queue(whead, &st->wait);
-}
-
-void dst_poll_exit(struct dst_state *st)
-{
- if (st->whead) {
- remove_wait_queue(st->whead, &st->wait);
- st->whead = NULL;
- }
-}
-
-int dst_poll_init(struct dst_state *st)
-{
- struct dst_poll_helper ph;
-
- ph.st = st;
- init_poll_funcptr(&ph.pt, &dst_queue_func);
-
- st->socket->ops->poll(NULL, st->socket, &ph.pt);
- return 0;
-}
-
-/*
- * Header receiving function - may block.
- */
-static int dst_data_recv_header(struct socket *sock,
- void *data, unsigned int size, int block)
-{
- struct msghdr msg;
- struct kvec iov;
- int err;
-
- iov.iov_base = data;
- iov.iov_len = size;
-
- msg.msg_iov = (struct iovec *)&iov;
- msg.msg_iovlen = 1;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = (block) ? MSG_WAITALL : MSG_DONTWAIT;
-
- err = kernel_recvmsg(sock, &msg, &iov, 1, iov.iov_len,
- msg.msg_flags);
- if (err != size)
- return -1;
-
- return 0;
-}
-
-/*
- * Header sending function - may block.
- */
-int dst_data_send_header(struct socket *sock,
- void *data, unsigned int size, int more)
-{
- struct msghdr msg;
- struct kvec iov;
- int err;
-
- iov.iov_base = data;
- iov.iov_len = size;
-
- msg.msg_iov = (struct iovec *)&iov;
- msg.msg_iovlen = 1;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_WAITALL | (more ? MSG_MORE : 0);
-
- err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
- if (err != size) {
- dprintk("%s: size: %u, more: %d, err: %d.\n",
- __func__, size, more, err);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Block autoconfiguration: request size of the storage and permissions.
- */
-static int dst_request_remote_config(struct dst_state *st)
-{
- struct dst_node *n = st->node;
- int err = -EINVAL;
- struct dst_cmd *cmd = st->data;
-
- memset(cmd, 0, sizeof(struct dst_cmd));
- cmd->cmd = DST_CFG;
-
- dst_convert_cmd(cmd);
-
- err = dst_data_send_header(st->socket, cmd, sizeof(struct dst_cmd), 0);
- if (err)
- goto out;
-
- err = dst_data_recv_header(st->socket, cmd, sizeof(struct dst_cmd), 1);
- if (err)
- goto out;
-
- dst_convert_cmd(cmd);
-
- if (cmd->cmd != DST_CFG) {
- err = -EINVAL;
- dprintk("%s: checking result: cmd: %d, size reported: %llu.\n",
- __func__, cmd->cmd, cmd->sector);
- goto out;
- }
-
- if (n->size != 0)
- n->size = min_t(loff_t, n->size, cmd->sector);
- else
- n->size = cmd->sector;
-
- n->info->size = n->size;
- st->permissions = cmd->rw;
-
-out:
- dprintk("%s: n: %p, err: %d, size: %llu, permission: %x.\n",
- __func__, n, err, n->size, st->permissions);
- return err;
-}
-
-/*
- * Socket machinery.
- */
-
-#define DST_DEFAULT_TIMEO 20000
-
-int dst_state_socket_create(struct dst_state *st)
-{
- int err;
- struct socket *sock;
- struct dst_network_ctl *ctl = &st->ctl;
-
- err = sock_create(ctl->addr.sa_family, ctl->type, ctl->proto, &sock);
- if (err < 0)
- return err;
-
- sock->sk->sk_sndtimeo = sock->sk->sk_rcvtimeo =
- msecs_to_jiffies(DST_DEFAULT_TIMEO);
- sock->sk->sk_allocation = GFP_NOIO;
-
- st->socket = st->read_socket = sock;
- return 0;
-}
-
-void dst_state_socket_release(struct dst_state *st)
-{
- dprintk("%s: st: %p, socket: %p, n: %p.\n",
- __func__, st, st->socket, st->node);
- if (st->socket) {
- sock_release(st->socket);
- st->socket = NULL;
- st->read_socket = NULL;
- }
-}
-
-void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str)
-{
- if (sk->ops->family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)sa;
- printk(KERN_INFO "%s %u.%u.%u.%u:%d.\n", str,
- NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
- } else if (sk->ops->family == AF_INET6) {
- struct sockaddr_in6 *sin = (struct sockaddr_in6 *)sa;
- printk(KERN_INFO "%s %pi6:%d",
- str, &sin->sin6_addr, ntohs(sin->sin6_port));
- }
-}
-
-void dst_state_exit_connected(struct dst_state *st)
-{
- if (st->socket) {
- dst_poll_exit(st);
- st->socket->ops->shutdown(st->socket, 2);
-
- dst_dump_addr(st->socket, (struct sockaddr *)&st->ctl.addr,
- "Disconnected peer");
- dst_state_socket_release(st);
- }
-}
-
-static int dst_state_init_connected(struct dst_state *st)
-{
- int err;
- struct dst_network_ctl *ctl = &st->ctl;
-
- err = dst_state_socket_create(st);
- if (err)
- goto err_out_exit;
-
- err = kernel_connect(st->socket, (struct sockaddr *)&st->ctl.addr,
- st->ctl.addr.sa_data_len, 0);
- if (err)
- goto err_out_release;
-
- err = dst_poll_init(st);
- if (err)
- goto err_out_release;
-
- dst_dump_addr(st->socket, (struct sockaddr *)&ctl->addr,
- "Connected to peer");
-
- return 0;
-
-err_out_release:
- dst_state_socket_release(st);
-err_out_exit:
- return err;
-}
-
-/*
- * State reset is used to reconnect to the remote peer.
- * May fail, but who cares, we will try again later.
- */
-static inline void dst_state_reset_nolock(struct dst_state *st)
-{
- dst_state_exit_connected(st);
- dst_state_init_connected(st);
-}
-
-static inline void dst_state_reset(struct dst_state *st)
-{
- dst_state_lock(st);
- dst_state_reset_nolock(st);
- dst_state_unlock(st);
-}
-
-/*
- * Basic network sending/receiving functions.
- * Blocked mode is used.
- */
-static int dst_data_recv_raw(struct dst_state *st, void *buf, u64 size)
-{
- struct msghdr msg;
- struct kvec iov;
- int err;
-
- BUG_ON(!size);
-
- iov.iov_base = buf;
- iov.iov_len = size;
-
- msg.msg_iov = (struct iovec *)&iov;
- msg.msg_iovlen = 1;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_DONTWAIT;
-
- err = kernel_recvmsg(st->socket, &msg, &iov, 1, iov.iov_len,
- msg.msg_flags);
- if (err <= 0) {
- dprintk("%s: failed to recv data: size: %llu, err: %d.\n",
- __func__, size, err);
- if (err == 0)
- err = -ECONNRESET;
-
- dst_state_exit_connected(st);
- }
-
- return err;
-}
-
-/*
- * Ping command to early detect failed nodes.
- */
-static int dst_send_ping(struct dst_state *st)
-{
- struct dst_cmd *cmd = st->data;
- int err = -ECONNRESET;
-
- dst_state_lock(st);
- if (st->socket) {
- memset(cmd, 0, sizeof(struct dst_cmd));
-
- cmd->cmd = __cpu_to_be32(DST_PING);
-
- err = dst_data_send_header(st->socket, cmd,
- sizeof(struct dst_cmd), 0);
- }
- dprintk("%s: st: %p, socket: %p, err: %d.\n", __func__,
- st, st->socket, err);
- dst_state_unlock(st);
-
- return err;
-}
-
-/*
- * Receiving function, which should either return error or read
- * whole block request. If there was no traffic for a one second,
- * send a ping, since remote node may die.
- */
-int dst_data_recv(struct dst_state *st, void *data, unsigned int size)
-{
- unsigned int revents = 0;
- unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
- unsigned int mask = err_mask | POLLIN;
- struct dst_node *n = st->node;
- int err = 0;
-
- while (size && !err) {
- revents = dst_state_poll(st);
-
- if (!(revents & mask)) {
- DEFINE_WAIT(wait);
-
- for (;;) {
- prepare_to_wait(&st->thread_wait, &wait,
- TASK_INTERRUPTIBLE);
- if (!n->trans_scan_timeout || st->need_exit)
- break;
-
- revents = dst_state_poll(st);
-
- if (revents & mask)
- break;
-
- if (signal_pending(current))
- break;
-
- if (!schedule_timeout(HZ)) {
- err = dst_send_ping(st);
- if (err)
- return err;
- }
-
- continue;
- }
- finish_wait(&st->thread_wait, &wait);
- }
-
- err = -ECONNRESET;
- dst_state_lock(st);
-
- if (st->socket && (st->read_socket == st->socket) &&
- (revents & POLLIN)) {
- err = dst_data_recv_raw(st, data, size);
- if (err > 0) {
- data += err;
- size -= err;
- err = 0;
- }
- }
-
- if (revents & err_mask || !st->socket) {
- dprintk("%s: revents: %x, socket: %p, size: %u, "
- "err: %d.\n", __func__, revents,
- st->socket, size, err);
- err = -ECONNRESET;
- }
-
- dst_state_unlock(st);
-
- if (!n->trans_scan_timeout)
- err = -ENODEV;
- }
-
- return err;
-}
-
-/*
- * Send block autoconf reply.
- */
-static int dst_process_cfg(struct dst_state *st)
-{
- struct dst_node *n = st->node;
- struct dst_cmd *cmd = st->data;
- int err;
-
- cmd->sector = n->size;
- cmd->rw = st->permissions;
-
- dst_convert_cmd(cmd);
-
- dst_state_lock(st);
- err = dst_data_send_header(st->socket, cmd, sizeof(struct dst_cmd), 0);
- dst_state_unlock(st);
-
- return err;
-}
-
-/*
- * Receive block IO from the network.
- */
-static int dst_recv_bio(struct dst_state *st, struct bio *bio,
- unsigned int total_size)
-{
- struct bio_vec *bv;
- int i, err;
- void *data;
- unsigned int sz;
-
- bio_for_each_segment(bv, bio, i) {
- sz = min(total_size, bv->bv_len);
-
- dprintk("%s: bio: %llu/%u, total: %u, len: %u, sz: %u, "
- "off: %u.\n", __func__, (u64)bio->bi_sector,
- bio->bi_size, total_size, bv->bv_len, sz,
- bv->bv_offset);
-
- data = kmap(bv->bv_page) + bv->bv_offset;
- err = dst_data_recv(st, data, sz);
- kunmap(bv->bv_page);
-
- bv->bv_len = sz;
-
- if (err)
- return err;
-
- total_size -= sz;
- if (total_size == 0)
- break;
- }
-
- return 0;
-}
-
-/*
- * Our block IO has just completed and arrived: get it.
- */
-static int dst_process_io_response(struct dst_state *st)
-{
- struct dst_node *n = st->node;
- struct dst_cmd *cmd = st->data;
- struct dst_trans *t;
- int err = 0;
- struct bio *bio;
-
- mutex_lock(&n->trans_lock);
- t = dst_trans_search(n, cmd->id);
- mutex_unlock(&n->trans_lock);
-
- if (!t)
- goto err_out_exit;
-
- bio = t->bio;
-
- dprintk("%s: bio: %llu/%u, cmd_size: %u, csize: %u, dir: %lu.\n",
- __func__, (u64)bio->bi_sector, bio->bi_size, cmd->size,
- cmd->csize, bio_data_dir(bio));
-
- if (bio_data_dir(bio) == READ) {
- if (bio->bi_size != cmd->size - cmd->csize)
- goto err_out_exit;
-
- if (dst_need_crypto(n)) {
- err = dst_recv_cdata(st, t->cmd.hash);
- if (err)
- goto err_out_exit;
- }
-
- err = dst_recv_bio(st, t->bio, bio->bi_size);
- if (err)
- goto err_out_exit;
-
- if (dst_need_crypto(n))
- return dst_trans_crypto(t);
- } else {
- err = -EBADMSG;
- if (cmd->size || cmd->csize)
- goto err_out_exit;
- }
-
- dst_trans_remove(t);
- dst_trans_put(t);
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-/*
- * Receive crypto data.
- */
-int dst_recv_cdata(struct dst_state *st, void *cdata)
-{
- struct dst_cmd *cmd = st->data;
- struct dst_node *n = st->node;
- struct dst_crypto_ctl *c = &n->crypto;
- int err;
-
- if (cmd->csize != c->crypto_attached_size) {
- dprintk("%s: cmd: cmd: %u, sector: %llu, size: %u, "
- "csize: %u != digest size %u.\n",
- __func__, cmd->cmd, cmd->sector, cmd->size,
- cmd->csize, c->crypto_attached_size);
- err = -EINVAL;
- goto err_out_exit;
- }
-
- err = dst_data_recv(st, cdata, cmd->csize);
- if (err)
- goto err_out_exit;
-
- cmd->size -= cmd->csize;
- return 0;
-
-err_out_exit:
- return err;
-}
-
-/*
- * Receive the command and start its processing.
- */
-static int dst_recv_processing(struct dst_state *st)
-{
- int err = -EINTR;
- struct dst_cmd *cmd = st->data;
-
- /*
- * If socket will be reset after this statement, then
- * dst_data_recv() will just fail and loop will
- * start again, so it can be done without any locks.
- *
- * st->read_socket is needed to prevents state machine
- * breaking between this data reading and subsequent one
- * in protocol specific functions during connection reset.
- * In case of reset we have to read next command and do
- * not expect data for old command to magically appear in
- * new connection.
- */
- st->read_socket = st->socket;
- err = dst_data_recv(st, cmd, sizeof(struct dst_cmd));
- if (err)
- goto out_exit;
-
- dst_convert_cmd(cmd);
-
- dprintk("%s: cmd: %u, size: %u, csize: %u, id: %llu, "
- "sector: %llu, flags: %llx, rw: %llx.\n",
- __func__, cmd->cmd, cmd->size,
- cmd->csize, cmd->id, cmd->sector,
- cmd->flags, cmd->rw);
-
- /*
- * This should catch protocol breakage and random garbage
- * instead of commands.
- */
- if (unlikely(cmd->csize > st->size - sizeof(struct dst_cmd))) {
- err = -EBADMSG;
- goto out_exit;
- }
-
- err = -EPROTO;
- switch (cmd->cmd) {
- case DST_IO_RESPONSE:
- err = dst_process_io_response(st);
- break;
- case DST_IO:
- err = dst_process_io(st);
- break;
- case DST_CFG:
- err = dst_process_cfg(st);
- break;
- case DST_PING:
- err = 0;
- break;
- default:
- break;
- }
-
-out_exit:
- return err;
-}
-
-/*
- * Receiving thread. For the client node we should try to reconnect,
- * for accepted client we just drop the state and expect it to reconnect.
- */
-static int dst_recv(void *init_data, void *schedule_data)
-{
- struct dst_state *st = schedule_data;
- struct dst_node *n = init_data;
- int err = 0;
-
- dprintk("%s: start st: %p, n: %p, scan: %lu, need_exit: %d.\n",
- __func__, st, n, n->trans_scan_timeout, st->need_exit);
-
- while (n->trans_scan_timeout && !st->need_exit) {
- err = dst_recv_processing(st);
- if (err < 0) {
- if (!st->ctl.type)
- break;
-
- if (!n->trans_scan_timeout || st->need_exit)
- break;
-
- dst_state_reset(st);
- msleep(1000);
- }
- }
-
- st->need_exit = 1;
- wake_up(&st->thread_wait);
-
- dprintk("%s: freeing receiving socket st: %p.\n", __func__, st);
- dst_state_lock(st);
- dst_state_exit_connected(st);
- dst_state_unlock(st);
- dst_state_put(st);
-
- dprintk("%s: freed receiving socket st: %p.\n", __func__, st);
-
- return err;
-}
-
-/*
- * Network state dies here and borns couple of lines below.
- * This object is the main network state processing engine:
- * sending, receiving, reconnections, all network related
- * tasks are handled on behalf of the state.
- */
-static void dst_state_free(struct dst_state *st)
-{
- dprintk("%s: st: %p.\n", __func__, st);
- if (st->cleanup)
- st->cleanup(st);
- kfree(st->data);
- kfree(st);
-}
-
-struct dst_state *dst_state_alloc(struct dst_node *n)
-{
- struct dst_state *st;
- int err = -ENOMEM;
-
- st = kzalloc(sizeof(struct dst_state), GFP_KERNEL);
- if (!st)
- goto err_out_exit;
-
- st->node = n;
- st->need_exit = 0;
-
- st->size = PAGE_SIZE;
- st->data = kmalloc(st->size, GFP_KERNEL);
- if (!st->data)
- goto err_out_free;
-
- spin_lock_init(&st->request_lock);
- INIT_LIST_HEAD(&st->request_list);
-
- mutex_init(&st->state_lock);
- init_waitqueue_head(&st->thread_wait);
-
- /*
- * One for processing thread, another one for node itself.
- */
- atomic_set(&st->refcnt, 2);
-
- dprintk("%s: st: %p, n: %p.\n", __func__, st, st->node);
-
- return st;
-
-err_out_free:
- kfree(st);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-int dst_state_schedule_receiver(struct dst_state *st)
-{
- return thread_pool_schedule_private(st->node->pool, dst_thread_setup,
- dst_recv, st, MAX_SCHEDULE_TIMEOUT, st->node);
-}
-
-/*
- * Initialize client's connection to the remote peer: allocate state,
- * connect and perform block IO autoconfiguration.
- */
-int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r)
-{
- struct dst_state *st;
- int err = -ENOMEM;
-
- st = dst_state_alloc(n);
- if (IS_ERR(st)) {
- err = PTR_ERR(st);
- goto err_out_exit;
- }
- memcpy(&st->ctl, r, sizeof(struct dst_network_ctl));
-
- err = dst_state_init_connected(st);
- if (err)
- goto err_out_free_data;
-
- err = dst_request_remote_config(st);
- if (err)
- goto err_out_exit_connected;
- n->state = st;
-
- err = dst_state_schedule_receiver(st);
- if (err)
- goto err_out_exit_connected;
-
- return 0;
-
-err_out_exit_connected:
- dst_state_exit_connected(st);
-err_out_free_data:
- dst_state_free(st);
-err_out_exit:
- n->state = NULL;
- return err;
-}
-
-void dst_state_put(struct dst_state *st)
-{
- dprintk("%s: st: %p, refcnt: %d.\n",
- __func__, st, atomic_read(&st->refcnt));
- if (atomic_dec_and_test(&st->refcnt))
- dst_state_free(st);
-}
-
-/*
- * Send block IO to the network one by one using zero-copy ->sendpage().
- */
-int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio)
-{
- struct bio_vec *bv;
- struct dst_crypto_ctl *c = &st->node->crypto;
- int err, i = 0;
- int flags = MSG_WAITALL;
-
- err = dst_data_send_header(st->socket, cmd,
- sizeof(struct dst_cmd) + c->crypto_attached_size, bio->bi_vcnt);
- if (err)
- goto err_out_exit;
-
- bio_for_each_segment(bv, bio, i) {
- if (i < bio->bi_vcnt - 1)
- flags |= MSG_MORE;
-
- err = kernel_sendpage(st->socket, bv->bv_page, bv->bv_offset,
- bv->bv_len, flags);
- if (err <= 0)
- goto err_out_exit;
- }
-
- return 0;
-
-err_out_exit:
- dprintk("%s: %d/%d, flags: %x, err: %d.\n",
- __func__, i, bio->bi_vcnt, flags, err);
- return err;
-}
-
-/*
- * Send transaction to the remote peer.
- */
-int dst_trans_send(struct dst_trans *t)
-{
- int err;
- struct dst_state *st = t->n->state;
- struct bio *bio = t->bio;
-
- dst_convert_cmd(&t->cmd);
-
- dst_state_lock(st);
- if (!st->socket) {
- err = dst_state_init_connected(st);
- if (err)
- goto err_out_unlock;
- }
-
- if (bio_data_dir(bio) == WRITE) {
- err = dst_send_bio(st, &t->cmd, t->bio);
- } else {
- err = dst_data_send_header(st->socket, &t->cmd,
- sizeof(struct dst_cmd), 0);
- }
- if (err)
- goto err_out_reset;
-
- dst_state_unlock(st);
- return 0;
-
-err_out_reset:
- dst_state_reset_nolock(st);
-err_out_unlock:
- dst_state_unlock(st);
-
- return err;
-}
diff --git a/drivers/staging/dst/thread_pool.c b/drivers/staging/dst/thread_pool.c
deleted file mode 100644
index 29a82b2..0000000
--- a/drivers/staging/dst/thread_pool.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/dst.h>
-#include <linux/kthread.h>
-#include <linux/slab.h>
-
-/*
- * Thread pool abstraction allows to schedule a work to be performed
- * on behalf of kernel thread. One does not operate with threads itself,
- * instead user provides setup and cleanup callbacks for thread pool itself,
- * and action and cleanup callbacks for each submitted work.
- *
- * Each worker has private data initialized at creation time and data,
- * provided by user at scheduling time.
- *
- * When action is being performed, thread can not be used by other users,
- * instead they will sleep until there is free thread to pick their work.
- */
-struct thread_pool_worker {
- struct list_head worker_entry;
-
- struct task_struct *thread;
-
- struct thread_pool *pool;
-
- int error;
- int has_data;
- int need_exit;
- unsigned int id;
-
- wait_queue_head_t wait;
-
- void *private;
- void *schedule_data;
-
- int (*action)(void *private, void *schedule_data);
- void (*cleanup)(void *private);
-};
-
-static void thread_pool_exit_worker(struct thread_pool_worker *w)
-{
- kthread_stop(w->thread);
-
- w->cleanup(w->private);
- kfree(w);
-}
-
-/*
- * Called to mark thread as ready and allow users to schedule new work.
- */
-static void thread_pool_worker_make_ready(struct thread_pool_worker *w)
-{
- struct thread_pool *p = w->pool;
-
- mutex_lock(&p->thread_lock);
-
- if (!w->need_exit) {
- list_move_tail(&w->worker_entry, &p->ready_list);
- w->has_data = 0;
- mutex_unlock(&p->thread_lock);
-
- wake_up(&p->wait);
- } else {
- p->thread_num--;
- list_del(&w->worker_entry);
- mutex_unlock(&p->thread_lock);
-
- thread_pool_exit_worker(w);
- }
-}
-
-/*
- * Thread action loop: waits until there is new work.
- */
-static int thread_pool_worker_func(void *data)
-{
- struct thread_pool_worker *w = data;
-
- while (!kthread_should_stop()) {
- wait_event_interruptible(w->wait,
- kthread_should_stop() || w->has_data);
-
- if (kthread_should_stop())
- break;
-
- if (!w->has_data)
- continue;
-
- w->action(w->private, w->schedule_data);
- thread_pool_worker_make_ready(w);
- }
-
- return 0;
-}
-
-/*
- * Remove single worker without specifying which one.
- */
-void thread_pool_del_worker(struct thread_pool *p)
-{
- struct thread_pool_worker *w = NULL;
-
- while (!w && p->thread_num) {
- wait_event(p->wait, !list_empty(&p->ready_list) ||
- !p->thread_num);
-
- dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
- __func__, list_empty(&p->ready_list),
- p->thread_num);
-
- mutex_lock(&p->thread_lock);
- if (!list_empty(&p->ready_list)) {
- w = list_first_entry(&p->ready_list,
- struct thread_pool_worker,
- worker_entry);
-
- dprintk("%s: deleting w: %p, thread_num: %d, "
- "list: %p [%p.%p].\n", __func__,
- w, p->thread_num, &p->ready_list,
- p->ready_list.prev, p->ready_list.next);
-
- p->thread_num--;
- list_del(&w->worker_entry);
- }
- mutex_unlock(&p->thread_lock);
- }
-
- if (w)
- thread_pool_exit_worker(w);
- dprintk("%s: deleted w: %p, thread_num: %d.\n",
- __func__, w, p->thread_num);
-}
-
-/*
- * Remove a worker with given ID.
- */
-void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id)
-{
- struct thread_pool_worker *w;
- int found = 0;
-
- mutex_lock(&p->thread_lock);
- list_for_each_entry(w, &p->ready_list, worker_entry) {
- if (w->id == id) {
- found = 1;
- p->thread_num--;
- list_del(&w->worker_entry);
- break;
- }
- }
-
- if (!found) {
- list_for_each_entry(w, &p->active_list, worker_entry) {
- if (w->id == id) {
- w->need_exit = 1;
- break;
- }
- }
- }
- mutex_unlock(&p->thread_lock);
-
- if (found)
- thread_pool_exit_worker(w);
-}
-
-/*
- * Add new worker thread with given parameters.
- * If initialization callback fails, return error.
- */
-int thread_pool_add_worker(struct thread_pool *p,
- char *name,
- unsigned int id,
- void *(*init)(void *private),
- void (*cleanup)(void *private),
- void *private)
-{
- struct thread_pool_worker *w;
- int err = -ENOMEM;
-
- w = kzalloc(sizeof(struct thread_pool_worker), GFP_KERNEL);
- if (!w)
- goto err_out_exit;
-
- w->pool = p;
- init_waitqueue_head(&w->wait);
- w->cleanup = cleanup;
- w->id = id;
-
- w->thread = kthread_run(thread_pool_worker_func, w, "%s", name);
- if (IS_ERR(w->thread)) {
- err = PTR_ERR(w->thread);
- goto err_out_free;
- }
-
- w->private = init(private);
- if (IS_ERR(w->private)) {
- err = PTR_ERR(w->private);
- goto err_out_stop_thread;
- }
-
- mutex_lock(&p->thread_lock);
- list_add_tail(&w->worker_entry, &p->ready_list);
- p->thread_num++;
- mutex_unlock(&p->thread_lock);
-
- return 0;
-
-err_out_stop_thread:
- kthread_stop(w->thread);
-err_out_free:
- kfree(w);
-err_out_exit:
- return err;
-}
-
-/*
- * Destroy the whole pool.
- */
-void thread_pool_destroy(struct thread_pool *p)
-{
- while (p->thread_num) {
- dprintk("%s: num: %d.\n", __func__, p->thread_num);
- thread_pool_del_worker(p);
- }
-
- kfree(p);
-}
-
-/*
- * Create a pool with given number of threads.
- * They will have sequential IDs started from zero.
- */
-struct thread_pool *thread_pool_create(int num, char *name,
- void *(*init)(void *private),
- void (*cleanup)(void *private),
- void *private)
-{
- struct thread_pool_worker *w, *tmp;
- struct thread_pool *p;
- int err = -ENOMEM;
- int i;
-
- p = kzalloc(sizeof(struct thread_pool), GFP_KERNEL);
- if (!p)
- goto err_out_exit;
-
- init_waitqueue_head(&p->wait);
- mutex_init(&p->thread_lock);
- INIT_LIST_HEAD(&p->ready_list);
- INIT_LIST_HEAD(&p->active_list);
- p->thread_num = 0;
-
- for (i = 0; i < num; ++i) {
- err = thread_pool_add_worker(p, name, i, init,
- cleanup, private);
- if (err)
- goto err_out_free_all;
- }
-
- return p;
-
-err_out_free_all:
- list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
- list_del(&w->worker_entry);
- thread_pool_exit_worker(w);
- }
- kfree(p);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-/*
- * Schedule execution of the action on a given thread,
- * provided ID pointer has to match previously stored
- * private data.
- */
-int thread_pool_schedule_private(struct thread_pool *p,
- int (*setup)(void *private, void *data),
- int (*action)(void *private, void *data),
- void *data, long timeout, void *id)
-{
- struct thread_pool_worker *w, *tmp, *worker = NULL;
- int err = 0;
-
- while (!worker && !err) {
- timeout = wait_event_interruptible_timeout(p->wait,
- !list_empty(&p->ready_list),
- timeout);
-
- if (!timeout) {
- err = -ETIMEDOUT;
- break;
- }
-
- worker = NULL;
- mutex_lock(&p->thread_lock);
- list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
- if (id && id != w->private)
- continue;
-
- worker = w;
-
- list_move_tail(&w->worker_entry, &p->active_list);
-
- err = setup(w->private, data);
- if (!err) {
- w->schedule_data = data;
- w->action = action;
- w->has_data = 1;
- wake_up(&w->wait);
- } else {
- list_move_tail(&w->worker_entry,
- &p->ready_list);
- }
-
- break;
- }
- mutex_unlock(&p->thread_lock);
- }
-
- return err;
-}
-
-/*
- * Schedule execution on arbitrary thread from the pool.
- */
-int thread_pool_schedule(struct thread_pool *p,
- int (*setup)(void *private, void *data),
- int (*action)(void *private, void *data),
- void *data, long timeout)
-{
- return thread_pool_schedule_private(p, setup,
- action, data, timeout, NULL);
-}
diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
deleted file mode 100644
index 1c36a6b..0000000
--- a/drivers/staging/dst/trans.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/bio.h>
-#include <linux/dst.h>
-#include <linux/slab.h>
-#include <linux/mempool.h>
-
-/*
- * Transaction memory pool size.
- */
-static int dst_mempool_num = 32;
-module_param(dst_mempool_num, int, 0644);
-
-/*
- * Transaction tree management.
- */
-static inline int dst_trans_cmp(dst_gen_t gen, dst_gen_t new)
-{
- if (gen < new)
- return 1;
- if (gen > new)
- return -1;
- return 0;
-}
-
-struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen)
-{
- struct rb_root *root = &node->trans_root;
- struct rb_node *n = root->rb_node;
- struct dst_trans *t, *ret = NULL;
- int cmp;
-
- while (n) {
- t = rb_entry(n, struct dst_trans, trans_entry);
-
- cmp = dst_trans_cmp(t->gen, gen);
- if (cmp < 0)
- n = n->rb_left;
- else if (cmp > 0)
- n = n->rb_right;
- else {
- ret = t;
- break;
- }
- }
-
- dprintk("%s: %s transaction: id: %llu.\n", __func__,
- (ret) ? "found" : "not found", gen);
-
- return ret;
-}
-
-static int dst_trans_insert(struct dst_trans *new)
-{
- struct rb_root *root = &new->n->trans_root;
- struct rb_node **n = &root->rb_node, *parent = NULL;
- struct dst_trans *ret = NULL, *t;
- int cmp;
-
- while (*n) {
- parent = *n;
-
- t = rb_entry(parent, struct dst_trans, trans_entry);
-
- cmp = dst_trans_cmp(t->gen, new->gen);
- if (cmp < 0)
- n = &parent->rb_left;
- else if (cmp > 0)
- n = &parent->rb_right;
- else {
- ret = t;
- break;
- }
- }
-
- new->send_time = jiffies;
- if (ret) {
- printk(KERN_DEBUG "%s: exist: old: gen: %llu, bio: %llu/%u, "
- "send_time: %lu, new: gen: %llu, bio: %llu/%u, "
- "send_time: %lu.\n", __func__,
- ret->gen, (u64)ret->bio->bi_sector,
- ret->bio->bi_size, ret->send_time,
- new->gen, (u64)new->bio->bi_sector,
- new->bio->bi_size, new->send_time);
- return -EEXIST;
- }
-
- rb_link_node(&new->trans_entry, parent, n);
- rb_insert_color(&new->trans_entry, root);
-
- dprintk("%s: inserted: gen: %llu, bio: %llu/%u, send_time: %lu.\n",
- __func__, new->gen, (u64)new->bio->bi_sector,
- new->bio->bi_size, new->send_time);
-
- return 0;
-}
-
-int dst_trans_remove_nolock(struct dst_trans *t)
-{
- struct dst_node *n = t->n;
-
- if (t->trans_entry.rb_parent_color) {
- rb_erase(&t->trans_entry, &n->trans_root);
- t->trans_entry.rb_parent_color = 0;
- }
- return 0;
-}
-
-int dst_trans_remove(struct dst_trans *t)
-{
- int ret;
- struct dst_node *n = t->n;
-
- mutex_lock(&n->trans_lock);
- ret = dst_trans_remove_nolock(t);
- mutex_unlock(&n->trans_lock);
-
- return ret;
-}
-
-/*
- * When transaction is completed and there are no more users,
- * we complete appriate block IO request with given error status.
- */
-void dst_trans_put(struct dst_trans *t)
-{
- if (atomic_dec_and_test(&t->refcnt)) {
- struct bio *bio = t->bio;
-
- dprintk("%s: completed t: %p, gen: %llu, bio: %p.\n",
- __func__, t, t->gen, bio);
-
- bio_endio(bio, t->error);
- bio_put(bio);
-
- dst_node_put(t->n);
- mempool_free(t, t->n->trans_pool);
- }
-}
-
-/*
- * Process given block IO request: allocate transaction, insert it into the tree
- * and send/schedule crypto processing.
- */
-int dst_process_bio(struct dst_node *n, struct bio *bio)
-{
- struct dst_trans *t;
- int err = -ENOMEM;
-
- t = mempool_alloc(n->trans_pool, GFP_NOFS);
- if (!t)
- goto err_out_exit;
-
- t->n = dst_node_get(n);
- t->bio = bio;
- t->error = 0;
- t->retries = 0;
- atomic_set(&t->refcnt, 1);
- t->gen = atomic_long_inc_return(&n->gen);
-
- t->enc = bio_data_dir(bio);
- dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
-
- mutex_lock(&n->trans_lock);
- err = dst_trans_insert(t);
- mutex_unlock(&n->trans_lock);
- if (err)
- goto err_out_free;
-
- dprintk("%s: gen: %llu, bio: %llu/%u, dir/enc: %d, need_crypto: %d.\n",
- __func__, t->gen, (u64)bio->bi_sector,
- bio->bi_size, t->enc, dst_need_crypto(n));
-
- if (dst_need_crypto(n) && t->enc)
- dst_trans_crypto(t);
- else
- dst_trans_send(t);
-
- return 0;
-
-err_out_free:
- dst_node_put(n);
- mempool_free(t, n->trans_pool);
-err_out_exit:
- bio_endio(bio, err);
- bio_put(bio);
- return err;
-}
-
-/*
- * Scan for timeout/stale transactions.
- * Each transaction is being resent multiple times before error completion.
- */
-static void dst_trans_scan(struct work_struct *work)
-{
- struct dst_node *n = container_of(work, struct dst_node,
- trans_work.work);
- struct rb_node *rb_node;
- struct dst_trans *t;
- unsigned long timeout = n->trans_scan_timeout;
- int num = 10 * n->trans_max_retries;
-
- mutex_lock(&n->trans_lock);
-
- for (rb_node = rb_first(&n->trans_root); rb_node; ) {
- t = rb_entry(rb_node, struct dst_trans, trans_entry);
-
- if (timeout && time_after(t->send_time + timeout, jiffies)
- && t->retries == 0)
- break;
-#if 0
- dprintk("%s: t: %p, gen: %llu, n: %s, retries: %u, max: %u.\n",
- __func__, t, t->gen, n->name,
- t->retries, n->trans_max_retries);
-#endif
- if (--num == 0)
- break;
-
- dst_trans_get(t);
-
- rb_node = rb_next(rb_node);
-
- if (timeout && (++t->retries < n->trans_max_retries)) {
- dst_trans_send(t);
- } else {
- t->error = -ETIMEDOUT;
- dst_trans_remove_nolock(t);
- dst_trans_put(t);
- }
-
- dst_trans_put(t);
- }
-
- mutex_unlock(&n->trans_lock);
-
- /*
- * If no timeout specified then system is in the middle of exiting
- * process, so no need to reschedule scanning process again.
- */
- if (timeout) {
- if (!num)
- timeout = HZ;
- schedule_delayed_work(&n->trans_work, timeout);
- }
-}
-
-/*
- * Flush all transactions and mark them as timed out.
- * Destroy transaction pools.
- */
-void dst_node_trans_exit(struct dst_node *n)
-{
- struct dst_trans *t;
- struct rb_node *rb_node;
-
- if (!n->trans_cache)
- return;
-
- dprintk("%s: n: %p, cancelling the work.\n", __func__, n);
- cancel_delayed_work_sync(&n->trans_work);
- flush_scheduled_work();
- dprintk("%s: n: %p, work has been cancelled.\n", __func__, n);
-
- for (rb_node = rb_first(&n->trans_root); rb_node; ) {
- t = rb_entry(rb_node, struct dst_trans, trans_entry);
-
- dprintk("%s: t: %p, gen: %llu, n: %s.\n",
- __func__, t, t->gen, n->name);
-
- rb_node = rb_next(rb_node);
-
- t->error = -ETIMEDOUT;
- dst_trans_remove_nolock(t);
- dst_trans_put(t);
- }
-
- mempool_destroy(n->trans_pool);
- kmem_cache_destroy(n->trans_cache);
-}
-
-/*
- * Initialize transaction storage for given node.
- * Transaction stores not only control information,
- * but also network command and crypto data (if needed)
- * to reduce number of allocations. Thus transaction size
- * differs from node to node.
- */
-int dst_node_trans_init(struct dst_node *n, unsigned int size)
-{
- /*
- * We need this, since node with given name can be dropped from the
- * hash table, but be still alive, so subsequent creation of the node
- * with the same name may collide with existing cache name.
- */
-
- snprintf(n->cache_name, sizeof(n->cache_name), "%s-%p", n->name, n);
-
- n->trans_cache = kmem_cache_create(n->cache_name,
- size + n->crypto.crypto_attached_size,
- 0, 0, NULL);
- if (!n->trans_cache)
- goto err_out_exit;
-
- n->trans_pool = mempool_create_slab_pool(dst_mempool_num,
- n->trans_cache);
- if (!n->trans_pool)
- goto err_out_cache_destroy;
-
- mutex_init(&n->trans_lock);
- n->trans_root = RB_ROOT;
-
- INIT_DELAYED_WORK(&n->trans_work, dst_trans_scan);
- schedule_delayed_work(&n->trans_work, n->trans_scan_timeout);
-
- dprintk("%s: n: %p, size: %u, crypto: %u.\n",
- __func__, n, size, n->crypto.crypto_attached_size);
-
- return 0;
-
-err_out_cache_destroy:
- kmem_cache_destroy(n->trans_cache);
-err_out_exit:
- return -ENOMEM;
-}
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
index 6da843c..e715e4d 100644
--- a/drivers/staging/et131x/et1310_address_map.h
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -203,11 +203,14 @@ typedef struct _GLOBAL_t { /* Location: */
* 9-0: pr ndes
*/
-#define ET_DMA10_MASK 0x3FF /* 10 bit mask for DMA10W types */
-#define ET_DMA10_WRAP 0x400
-#define ET_DMA4_MASK 0x00F /* 4 bit mask for DMA4W types */
-#define ET_DMA4_WRAP 0x010
-
+#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
+#define ET_DMA12_WRAP 0x1000
+#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
+#define ET_DMA10_WRAP 0x0400
+#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
+#define ET_DMA4_WRAP 0x0010
+
+#define INDEX12(x) ((x) & ET_DMA12_MASK)
#define INDEX10(x) ((x) & ET_DMA10_MASK)
#define INDEX4(x) ((x) & ET_DMA4_MASK)
@@ -216,6 +219,11 @@ extern inline void add_10bit(u32 *v, int n)
*v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
}
+extern inline void add_12bit(u32 *v, int n)
+{
+ *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
+}
+
/*
* 10bit DMA with wrap
* txdma tx queue write address reg in txdma address map at 0x1010
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
index 3ddc9b1..81c1a74 100644
--- a/drivers/staging/et131x/et1310_rx.c
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -831,10 +831,10 @@ PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
/* Indicate that we have used this PSR entry. */
/* FIXME wrap 12 */
- rx_local->local_psr_full = (rx_local->local_psr_full + 1) & 0xFFF;
- if (rx_local->local_psr_full > rx_local->PsrNumEntries - 1) {
+ add_12bit(&rx_local->local_psr_full, 1);
+ if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) {
/* Clear psr full and toggle the wrap bit */
- rx_local->local_psr_full &= 0xFFF;
+ rx_local->local_psr_full &= ~0xFFF;
rx_local->local_psr_full ^= 0x1000;
}
diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
index c5b6613..c2809f2 100644
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/Hv.c
@@ -386,7 +386,7 @@ u16 HvSignalEvent(void)
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
-int HvSynicInit(u32 irqVector)
+void HvSynicInit(void *irqarg)
{
u64 version;
union hv_synic_simp simp;
@@ -394,13 +394,14 @@ int HvSynicInit(u32 irqVector)
union hv_synic_sint sharedSint;
union hv_synic_scontrol sctrl;
u64 guestID;
- int ret = 0;
+ u32 irqVector = *((u32 *)(irqarg));
+ int cpu = smp_processor_id();
DPRINT_ENTER(VMBUS);
if (!gHvContext.HypercallPage) {
DPRINT_EXIT(VMBUS);
- return ret;
+ return;
}
/* Check the version */
@@ -425,27 +426,27 @@ int HvSynicInit(u32 irqVector)
*/
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
if (guestID == HV_LINUX_GUEST_ID) {
- gHvContext.synICMessagePage[0] =
+ gHvContext.synICMessagePage[cpu] =
phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
- gHvContext.synICEventPage[0] =
+ gHvContext.synICEventPage[cpu] =
phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
} else {
DPRINT_ERR(VMBUS, "unknown guest id!!");
goto Cleanup;
}
DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
- gHvContext.synICMessagePage[0],
- gHvContext.synICEventPage[0]);
+ gHvContext.synICMessagePage[cpu],
+ gHvContext.synICEventPage[cpu]);
} else {
- gHvContext.synICMessagePage[0] = osd_PageAlloc(1);
- if (gHvContext.synICMessagePage[0] == NULL) {
+ gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (gHvContext.synICMessagePage[cpu] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC message page!!");
goto Cleanup;
}
- gHvContext.synICEventPage[0] = osd_PageAlloc(1);
- if (gHvContext.synICEventPage[0] == NULL) {
+ gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (gHvContext.synICEventPage[cpu] == NULL) {
DPRINT_ERR(VMBUS,
"unable to allocate SYNIC event page!!");
goto Cleanup;
@@ -454,7 +455,7 @@ int HvSynicInit(u32 irqVector)
/* Setup the Synic's message page */
rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
simp.SimpEnabled = 1;
- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0])
+ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
>> PAGE_SHIFT;
DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
@@ -465,7 +466,7 @@ int HvSynicInit(u32 irqVector)
/* Setup the Synic's event page */
rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
siefp.SiefpEnabled = 1;
- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0])
+ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
>> PAGE_SHIFT;
DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
@@ -501,32 +502,30 @@ int HvSynicInit(u32 irqVector)
DPRINT_EXIT(VMBUS);
- return ret;
+ return;
Cleanup:
- ret = -1;
-
if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
- if (gHvContext.synICEventPage[0])
- osd_PageFree(gHvContext.synICEventPage[0], 1);
+ if (gHvContext.synICEventPage[cpu])
+ osd_PageFree(gHvContext.synICEventPage[cpu], 1);
- if (gHvContext.synICMessagePage[0])
- osd_PageFree(gHvContext.synICMessagePage[0], 1);
+ if (gHvContext.synICMessagePage[cpu])
+ osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
}
DPRINT_EXIT(VMBUS);
-
- return ret;
+ return;
}
/**
* HvSynicCleanup - Cleanup routine for HvSynicInit().
*/
-void HvSynicCleanup(void)
+void HvSynicCleanup(void *arg)
{
union hv_synic_sint sharedSint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
+ int cpu = smp_processor_id();
DPRINT_ENTER(VMBUS);
@@ -539,6 +538,7 @@ void HvSynicCleanup(void)
sharedSint.Masked = 1;
+ /* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
@@ -560,8 +560,8 @@ void HvSynicCleanup(void)
wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
- osd_PageFree(gHvContext.synICMessagePage[0], 1);
- osd_PageFree(gHvContext.synICEventPage[0], 1);
+ osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
+ osd_PageFree(gHvContext.synICEventPage[cpu], 1);
}
DPRINT_EXIT(VMBUS);
diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h
index 5379e4b..fce4b5c 100644
--- a/drivers/staging/hv/Hv.h
+++ b/drivers/staging/hv/Hv.h
@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
},
};
-#define MAX_NUM_CPUS 1
+#define MAX_NUM_CPUS 32
struct hv_input_signal_event_buffer {
@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId,
extern u16 HvSignalEvent(void);
-extern int HvSynicInit(u32 irqVector);
+extern void HvSynicInit(void *irqarg);
-extern void HvSynicCleanup(void);
+extern void HvSynicCleanup(void *arg);
#endif /* __HV_H__ */
diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c
index a4dd06f..35a023e 100644
--- a/drivers/staging/hv/Vmbus.c
+++ b/drivers/staging/hv/Vmbus.c
@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
/* strcpy(dev->name, "vmbus"); */
/* SynIC setup... */
- ret = HvSynicInit(*irqvector);
+ on_each_cpu(HvSynicInit, (void *)irqvector, 1);
/* Connect to VMBus in the root partition */
ret = VmbusConnect();
@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
DPRINT_ENTER(VMBUS);
VmbusChannelReleaseUnattachedChannels();
VmbusDisconnect();
- HvSynicCleanup();
+ on_each_cpu(HvSynicCleanup, NULL, 1);
DPRINT_EXIT(VMBUS);
return ret;
@@ -173,7 +173,8 @@ static void VmbusOnCleanup(struct hv_driver *drv)
*/
static void VmbusOnMsgDPC(struct hv_driver *drv)
{
- void *page_addr = gHvContext.synICMessagePage[0];
+ int cpu = smp_processor_id();
+ void *page_addr = gHvContext.synICMessagePage[cpu];
struct hv_message *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct hv_message *copied;
@@ -230,11 +231,12 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
static int VmbusOnISR(struct hv_driver *drv)
{
int ret = 0;
+ int cpu = smp_processor_id();
void *page_addr;
struct hv_message *msg;
union hv_synic_event_flags *event;
- page_addr = gHvContext.synICMessagePage[0];
+ page_addr = gHvContext.synICMessagePage[cpu];
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
DPRINT_ENTER(VMBUS);
@@ -248,7 +250,7 @@ static int VmbusOnISR(struct hv_driver *drv)
}
/* TODO: Check if there are events to be process */
- page_addr = gHvContext.synICEventPage[0];
+ page_addr = gHvContext.synICEventPage[cpu];
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
/* Since we are a child, we only need to check bit 0 */
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index f0b86f0..fd677f0 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -29,7 +29,6 @@
* driver requests - some may support multiple options */
-#include <linux/autoconf.h>
#include "iio.h"
#include "ring_generic.h"
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 536e238..638ad6b 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,7 +1,8 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
depends on CPU_CAVIUM_OCTEON
- select MII
+ select PHYLIB
+ select MDIO_OCTEON
help
This driver supports the builtin ethernet ports on Cavium
Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 31a58e5..05a5cc0 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -26,7 +26,8 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
+
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -34,86 +35,12 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-mdio.h"
+#include "ethernet-util.h"
#include "cvmx-helper-board.h"
#include "cvmx-smix-defs.h"
-DECLARE_MUTEX(mdio_sem);
-
-/**
- * Perform an MII read. Called by the generic MII routines
- *
- * @dev: Device to perform read for
- * @phy_id: The MII phy id
- * @location: Register location to read
- * Returns Result from the read or zero on failure
- */
-static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = 1;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
-
- do {
- if (!in_interrupt())
- yield();
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
- } while (smi_rd.s.pending);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return 0;
-}
-
-static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
- int location)
-{
- return 0xffff;
-}
-
-/**
- * Perform an MII write. Called by the generic MII routines
- *
- * @dev: Device to perform write for
- * @phy_id: The MII phy id
- * @location: Register location to write
- * @val: Value to write
- */
-static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = 0;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
-
- do {
- if (!in_interrupt())
- yield();
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
- } while (smi_wr.s.pending);
-}
-
-static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
- int location, int val)
-{
-}
-
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -125,49 +52,37 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_ethtool_gset(&priv->mii_info, cmd);
- up(&mdio_sem);
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, cmd);
- return ret;
+ return -EINVAL;
}
static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_ethtool_sset(&priv->mii_info, cmd);
- up(&mdio_sem);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, cmd);
- return ret;
+ return -EINVAL;
}
static int cvm_oct_nway_reset(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_nway_restart(&priv->mii_info);
- up(&mdio_sem);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
- return ret;
-}
+ if (priv->phydev)
+ return phy_start_aneg(priv->phydev);
-static u32 cvm_oct_get_link(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- u32 ret;
-
- down(&mdio_sem);
- ret = mii_link_ok(&priv->mii_info);
- up(&mdio_sem);
-
- return ret;
+ return -EINVAL;
}
const struct ethtool_ops cvm_oct_ethtool_ops = {
@@ -175,7 +90,7 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
.get_settings = cvm_oct_get_settings,
.set_settings = cvm_oct_set_settings,
.nway_reset = cvm_oct_nway_reset,
- .get_link = cvm_oct_get_link,
+ .get_link = ethtool_op_get_link,
.get_sg = ethtool_op_get_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
};
@@ -191,41 +106,78 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(rq);
- unsigned int duplex_chg;
- int ret;
- down(&mdio_sem);
- ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg);
- up(&mdio_sem);
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+}
- return ret;
+static void cvm_oct_adjust_link(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_helper_link_info_t link_info;
+
+ if (priv->last_link != priv->phydev->link) {
+ priv->last_link = priv->phydev->link;
+ link_info.u64 = 0;
+ link_info.s.link_up = priv->last_link ? 1 : 0;
+ link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
+ link_info.s.speed = priv->phydev->speed;
+ cvmx_helper_link_set( priv->port, link_info);
+ if (priv->last_link) {
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port);
+ } else {
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
+ }
}
+
/**
- * Setup the MDIO device structures
+ * Setup the PHY
*
* @dev: Device to setup
*
* Returns Zero on success, negative on failure
*/
-int cvm_oct_mdio_setup_device(struct net_device *dev)
+int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int phy_id = cvmx_helper_board_get_mii_address(priv->port);
- if (phy_id != -1) {
- priv->mii_info.dev = dev;
- priv->mii_info.phy_id = phy_id;
- priv->mii_info.phy_id_mask = 0xff;
- priv->mii_info.supports_gmii = 1;
- priv->mii_info.reg_num_mask = 0x1f;
- priv->mii_info.mdio_read = cvm_oct_mdio_read;
- priv->mii_info.mdio_write = cvm_oct_mdio_write;
- } else {
- /* Supply dummy MDIO routines so the kernel won't crash
- if the user tries to read them */
- priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read;
- priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write;
+
+ int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
+ if (phy_addr != -1) {
+ char phy_id[20];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
+
+ priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (IS_ERR(priv->phydev)) {
+ priv->phydev = NULL;
+ return -1;
+ }
+ priv->last_link = 0;
+ phy_start_aneg(priv->phydev);
}
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index b3328ae..55d0614a 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -43,4 +43,4 @@
extern const struct ethtool_ops cvm_oct_ethtool_ops;
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int cvm_oct_mdio_setup_device(struct net_device *dev);
+int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
index 8fa88fc..16308d4 100644
--- a/drivers/staging/octeon/ethernet-proc.c
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -25,7 +25,6 @@
* Contact Cavium Networks for more information
**********************************************************************/
#include <linux/kernel.h>
-#include <linux/mii.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <net/dst.h>
@@ -38,112 +37,6 @@
#include "cvmx-helper.h"
#include "cvmx-pip.h"
-static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
- int phy_id, int offset)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
- return ((uint64_t) priv->mii_info.
- mdio_read(dev, phy_id,
- 0x1e) << 16) | (uint64_t) priv->mii_info.
- mdio_read(dev, phy_id, 0x1f);
-}
-
-static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
-{
- static const int ports[] = { 0, 1, 2, 3, 9, -1 };
- struct net_device *dev = cvm_oct_device[0];
- int index = 0;
-
- while (ports[index] != -1) {
-
- /* Latch port */
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
- 0xdc00 | ports[index]);
- seq_printf(m, "\nSwitch Port %d\n", ports[index]);
- seq_printf(m, "InGoodOctets: %12llu\t"
- "OutOctets: %12llu\t"
- "64 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b,
- 0x00) |
- (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
- cvm_oct_stats_read_switch(dev, 0x1b,
- 0x0E) |
- (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
-
- seq_printf(m, "InBadOctets: %12llu\t"
- "OutUnicast: %12llu\t"
- "65-127 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
-
- seq_printf(m, "InUnicast: %12llu\t"
- "OutBroadcasts: %12llu\t"
- "128-255 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
-
- seq_printf(m, "InBroadcasts: %12llu\t"
- "OutMulticasts: %12llu\t"
- "256-511 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
-
- seq_printf(m, "InMulticasts: %12llu\t"
- "OutPause: %12llu\t"
- "512-1023 Octets:%12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
-
- seq_printf(m, "InPause: %12llu\t"
- "Excessive: %12llu\t"
- "1024-Max Octets:%12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
-
- seq_printf(m, "InUndersize: %12llu\t"
- "Collisions: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
-
- seq_printf(m, "InFragments: %12llu\t"
- "Deferred: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
-
- seq_printf(m, "InOversize: %12llu\t"
- "Single: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
-
- seq_printf(m, "InJabber: %12llu\t"
- "Multiple: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
-
- seq_printf(m, "In RxErr: %12llu\t"
- "OutFCSErr: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
-
- seq_printf(m, "InFCSErr: %12llu\t"
- "Late: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
- index++;
- }
- return 0;
-}
-
/**
* User is reading /proc/octeon_ethernet_stats
*
@@ -215,11 +108,6 @@ static int cvm_oct_stats_show(struct seq_file *m, void *v)
}
}
- if (cvm_oct_device[0]) {
- priv = netdev_priv(cvm_oct_device[0]);
- if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- cvm_oct_stats_switch_show(m, v);
- }
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index fbaa465..3820f1e 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -147,32 +147,36 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
gmxx_rxx_int_reg.u64);
}
-
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
+ if (priv->phydev == NULL) {
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ priv->link_info = link_info.u64;
+ }
spin_unlock_irqrestore(&global_register_lock, flags);
- /* Tell Linux */
- if (link_info.s.link_up) {
-
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- if (priv->queue != -1)
- DEBUGPRINT
- ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port, priv->queue);
- else
- DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port);
- } else {
-
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ if (priv->phydev == NULL) {
+ /* Tell core. */
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
}
}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 2b54996..6061d01 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -113,7 +113,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
- if (!octeon_is_simulation())
+ if (!octeon_is_simulation() && priv->phydev == NULL)
priv->poll = cvm_oct_sgmii_poll;
/* FIXME: Need autoneg logic */
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 0c2e7cc..ee3dc41 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -112,7 +112,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
- if (!octeon_is_simulation())
+ if (!octeon_is_simulation() && priv->phydev == NULL)
priv->poll = cvm_oct_xaui_poll;
return 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 492c502..4cfd4b1 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -30,7 +30,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <net/dst.h>
@@ -132,8 +132,6 @@ static struct timer_list cvm_oct_poll_timer;
*/
struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-extern struct semaphore mdio_sem;
-
/**
* Periodic timer tick for slow management operations
*
@@ -160,13 +158,8 @@ static void cvm_do_timer(unsigned long arg)
goto out;
priv = netdev_priv(cvm_oct_device[port]);
- if (priv->poll) {
- /* skip polling if we don't get the lock */
- if (!down_trylock(&mdio_sem)) {
- priv->poll(cvm_oct_device[port]);
- up(&mdio_sem);
- }
- }
+ if (priv->poll)
+ priv->poll(cvm_oct_device[port]);
queues_per_port = cvmx_pko_get_num_queues(port);
/* Drain any pending packets in the free list */
@@ -524,7 +517,7 @@ int cvm_oct_common_init(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
- cvm_oct_mdio_setup_device(dev);
+ cvm_oct_phy_setup_device(dev);
dev->netdev_ops->ndo_set_mac_address(dev, &sa);
dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
@@ -540,7 +533,10 @@ int cvm_oct_common_init(struct net_device *dev)
void cvm_oct_common_uninit(struct net_device *dev)
{
- /* Currently nothing to do */
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
}
static const struct net_device_ops cvm_oct_npi_netdev_ops = {
@@ -627,6 +623,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
#endif
};
+extern void octeon_mdiobus_force_mod_depencency(void);
+
/**
* Module/ driver initialization. Creates the linux network
* devices.
@@ -640,6 +638,7 @@ static int __init cvm_oct_init_module(void)
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
+ octeon_mdiobus_force_mod_depencency();
pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 3aef987..402a15b 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -50,9 +50,9 @@ struct octeon_ethernet {
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
/* Device statistics */
- struct net_device_stats stats
-; /* Generic MII info structure */
- struct mii_if_info mii_info;
+ struct net_device_stats stats;
+ struct phy_device *phydev;
+ unsigned int last_link;
/* Last negotiated link state */
uint64_t link_info;
/* Called periodically to check link status */
diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig
index 3abe7c9..3defa01 100644
--- a/drivers/staging/panel/Kconfig
+++ b/drivers/staging/panel/Kconfig
@@ -47,7 +47,7 @@ config PANEL_PROFILE
config PANEL_KEYPAD
depends on PANEL && PANEL_PROFILE="0"
int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
- range 0 4
+ range 0 3
default 0
---help---
This enables and configures a keypad connected to the parallel port.
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 4ce399b..95c93e8 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -55,7 +55,7 @@
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/io.h>
#include <asm/uaccess.h>
@@ -378,7 +378,7 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
#ifdef CONFIG_PANEL_LCD_CHARSET
#undef DEFAULT_LCD_CHARSET
-#define DEFAULT_LCD_CHARSET
+#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
#endif
#endif /* DEFAULT_PROFILE == 0 */
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 6c5b261..aacd25b 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -722,8 +722,6 @@ static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry)
if (inode->i_nlink)
inode_dec_link_count(inode);
}
- dprintk("%s: inode: %p, lock: %ld, unhashed: %d.\n",
- __func__, pi, inode->i_state & I_LOCK, hlist_unhashed(&inode->i_hash));
return err;
}
diff --git a/drivers/staging/ramzswap/TODO b/drivers/staging/ramzswap/TODO
index bac40d6..8d64e28 100644
--- a/drivers/staging/ramzswap/TODO
+++ b/drivers/staging/ramzswap/TODO
@@ -1,6 +1,5 @@
TODO:
- Add support for swap notifiers
- - Remove CONFIG_ARM hack
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
Nitin Gupta <ngupta@vflare.org>
diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c
index b839f05..989fac5 100644
--- a/drivers/staging/ramzswap/ramzswap_drv.c
+++ b/drivers/staging/ramzswap/ramzswap_drv.c
@@ -222,28 +222,6 @@ out:
return ret;
}
-static void ramzswap_flush_dcache_page(struct page *page)
-{
-#ifdef CONFIG_ARM
- int flag = 0;
- /*
- * Ugly hack to get flush_dcache_page() work on ARM.
- * page_mapping(page) == NULL after clearing this swap cache flag.
- * Without clearing this flag, flush_dcache_page() will simply set
- * "PG_dcache_dirty" bit and return.
- */
- if (PageSwapCache(page)) {
- flag = 1;
- ClearPageSwapCache(page);
- }
-#endif
- flush_dcache_page(page);
-#ifdef CONFIG_ARM
- if (flag)
- SetPageSwapCache(page);
-#endif
-}
-
void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
struct ramzswap_ioctl_stats *s)
{
@@ -655,7 +633,7 @@ static int handle_zero_page(struct bio *bio)
memset(user_mem, 0, PAGE_SIZE);
kunmap_atomic(user_mem, KM_USER0);
- ramzswap_flush_dcache_page(page);
+ flush_dcache_page(page);
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
@@ -679,7 +657,7 @@ static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
kunmap_atomic(user_mem, KM_USER0);
kunmap_atomic(cmem, KM_USER1);
- ramzswap_flush_dcache_page(page);
+ flush_dcache_page(page);
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
@@ -779,7 +757,7 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
goto out;
}
- ramzswap_flush_dcache_page(page);
+ flush_dcache_page(page);
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio_endio(bio, 0);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 3222c22..0d490c1 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -1318,13 +1318,13 @@ extern int ieee80211_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-extern int ieee80211_xmit(struct sk_buff *skb,
+extern int ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
@@ -1376,8 +1376,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
@@ -1385,7 +1385,7 @@ extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct
extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
-extern void ieee80211_start_scan(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
//Add for RF power on power off by lizhaoming 080512
extern void SendDisassociation(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index f882dd8..9128c18 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -469,7 +469,7 @@ drop:
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
-int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 1fe19c3..c7c645a 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -689,7 +689,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
}
/* called with ieee->lock held */
-void ieee80211_start_scan(struct ieee80211_device *ieee)
+void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
{
if(IS_DOT11D_ENABLE(ieee) )
{
@@ -1196,7 +1196,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -1898,7 +1898,7 @@ associate_complete:
ieee80211_associate_step2(ieee);
}else{
- ieee80211_auth_challenge(ieee, challenge, chlen);
+ ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
}
}else{
ieee->softmac_stats.rx_auth_rs_err++;
@@ -2047,7 +2047,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
}
-void ieee80211_wake_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2089,7 +2089,7 @@ exit :
}
-void ieee80211_stop_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
//unsigned long flags;
//spin_lock_irqsave(&ieee->lock,flags);
@@ -2301,7 +2301,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
//#else
if (ieee->state == IEEE80211_NOLINK){
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
//#endif
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -2357,7 +2357,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work)
if(ieee->state == IEEE80211_NOLINK){
ieee->beinretry = false;
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
//YJ,add,080828, notify os here
if(ieee->state == IEEE80211_NOLINK)
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index dde1f2e..69bd021 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -304,7 +304,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
/* SKBs are added to the ieee->tx_queue. */
-int ieee80211_xmit(struct sk_buff *skb,
+int ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 57c62b0..e0f13ef 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1811,7 +1811,7 @@ void rtl8180_rx(struct net_device *dev)
if(priv->rx_skb->len > 4)
skb_trim(priv->rx_skb,priv->rx_skb->len-4);
#ifndef RX_DONT_PASS_UL
- if(!ieee80211_rx(priv->ieee80211,
+ if(!ieee80211_rtl_rx(priv->ieee80211,
priv->rx_skb, &stats)){
#endif // RX_DONT_PASS_UL
@@ -1917,11 +1917,11 @@ rate)
if (!check_nic_enought_desc(dev, priority)){
DMESGW("Error: no descriptor left by previous TX (avail %d) ",
get_curr_tx_free_desc(dev, priority));
- ieee80211_stop_queue(priv->ieee80211);
+ ieee80211_rtl_stop_queue(priv->ieee80211);
}
rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate);
if (!check_nic_enought_desc(dev, priority))
- ieee80211_stop_queue(priv->ieee80211);
+ ieee80211_rtl_stop_queue(priv->ieee80211);
spin_unlock_irqrestore(&priv->tx_lock,flags);
}
@@ -3680,7 +3680,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
.ndo_set_mac_address = r8180_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
- .ndo_start_xmit = ieee80211_xmit,
+ .ndo_start_xmit = ieee80211_rtl_xmit,
};
static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
@@ -3900,7 +3900,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri)
spin_unlock_irqrestore(&priv->tx_lock,flags);
if(enough_desc)
- ieee80211_wake_queue(priv->ieee80211);
+ ieee80211_rtl_wake_queue(priv->ieee80211);
}
void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 536cb6e..124cde3 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -377,7 +377,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
// queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq);
//printk("start scan============================>\n");
ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
-//ieee80211_start_scan(priv->ieee80211);
+//ieee80211_rtl_start_scan(priv->ieee80211);
/* intentionally forget to up sem */
// up(&priv->ieee80211->wx_sem);
ret = 0;
diff --git a/drivers/staging/rtl8192e/ieee80211.h b/drivers/staging/rtl8192e/ieee80211.h
index 97137dd..3ba9e9e 100644
--- a/drivers/staging/rtl8192e/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211.h
@@ -303,8 +303,8 @@ enum _ReasonCode{
#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
#define ieee80211_get_beacon ieee80211_get_beacon_rsl
-#define ieee80211_wake_queue ieee80211_wake_queue_rsl
-#define ieee80211_stop_queue ieee80211_stop_queue_rsl
+#define ieee80211_rtl_wake_queue ieee80211_rtl_wake_queue_rsl
+#define ieee80211_rtl_stop_queue ieee80211_rtl_stop_queue_rsl
#define ieee80211_reset_queue ieee80211_reset_queue_rsl
#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
@@ -2435,13 +2435,13 @@ extern int ieee80211_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-extern int ieee80211_xmit(struct sk_buff *skb,
+extern int ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
@@ -2502,8 +2502,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index 83c8452..aa76390 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -333,8 +333,8 @@ enum _ReasonCode{
#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
#define ieee80211_get_beacon ieee80211_get_beacon_rsl
-#define ieee80211_wake_queue ieee80211_wake_queue_rsl
-#define ieee80211_stop_queue ieee80211_stop_queue_rsl
+#define ieee80211_rtl_wake_queue ieee80211_rtl_wake_queue_rsl
+#define ieee80211_rtl_stop_queue ieee80211_rtl_stop_queue_rsl
#define ieee80211_reset_queue ieee80211_reset_queue_rsl
#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
@@ -2546,13 +2546,13 @@ extern int ieee80211_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-extern int ieee80211_xmit(struct sk_buff *skb,
+extern int ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
@@ -2613,8 +2613,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index 2644155..f43a7db 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -119,7 +119,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee = (struct ieee80211_device *)dev->priv;
#endif
#if 0
- dev->hard_start_xmit = ieee80211_xmit;
+ dev->hard_start_xmit = ieee80211_rtl_xmit;
#endif
memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv);
@@ -333,7 +333,7 @@ extern void ieee80211_crypto_ccmp_exit(void);
extern int ieee80211_crypto_wep_init(void);
extern void ieee80211_crypto_wep_exit(void);
-int __init ieee80211_init(void)
+int __init ieee80211_rtl_init(void)
{
struct proc_dir_entry *e;
int retval;
@@ -389,7 +389,7 @@ int __init ieee80211_init(void)
return 0;
}
-void __exit ieee80211_exit(void)
+void __exit ieee80211_rtl_exit(void)
{
if (ieee80211_proc) {
remove_proc_entry("debug_level", ieee80211_proc);
@@ -412,8 +412,8 @@ module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
-//module_exit(ieee80211_exit);
-//module_init(ieee80211_init);
+//module_exit(ieee80211_rtl_exit);
+//module_init(ieee80211_rtl_init);
#endif
#endif
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
index 5dc478b..06d9171 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
@@ -923,7 +923,7 @@ u8 parse_subframe(struct sk_buff *skb,
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
-int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index 593d228..6d1ddec 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -684,7 +684,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
}
/* called with ieee->lock held */
-void ieee80211_start_scan(struct ieee80211_device *ieee)
+void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
{
#ifdef ENABLE_DOT11D
if(IS_DOT11D_ENABLE(ieee) )
@@ -1430,7 +1430,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -2262,7 +2262,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee80211_associate_step2(ieee);
}else{
- ieee80211_auth_challenge(ieee, challenge, chlen);
+ ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
}
}else{
ieee->softmac_stats.rx_auth_rs_err++;
@@ -2376,7 +2376,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
* to check it any more.
* */
//printk("error:no descriptor left@queue_index %d\n", queue_index);
- //ieee80211_stop_queue(ieee);
+ //ieee80211_rtl_stop_queue(ieee);
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]);
#else
@@ -2440,7 +2440,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
}
-void ieee80211_wake_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2481,7 +2481,7 @@ exit :
}
-void ieee80211_stop_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
//unsigned long flags;
//spin_lock_irqsave(&ieee->lock,flags);
@@ -2706,7 +2706,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
if (ieee->state == IEEE80211_NOLINK){
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -2775,7 +2775,7 @@ void ieee80211_associate_retry_wq(struct ieee80211_device *ieee)
{
ieee->is_roaming= false;
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -3497,8 +3497,8 @@ void notify_wx_assoc_event(struct ieee80211_device *ieee)
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
//EXPORT_SYMBOL(ieee80211_get_beacon);
-//EXPORT_SYMBOL(ieee80211_wake_queue);
-//EXPORT_SYMBOL(ieee80211_stop_queue);
+//EXPORT_SYMBOL(ieee80211_rtl_wake_queue);
+//EXPORT_SYMBOL(ieee80211_rtl_stop_queue);
//EXPORT_SYMBOL(ieee80211_reset_queue);
//EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
//EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
@@ -3518,8 +3518,8 @@ void notify_wx_assoc_event(struct ieee80211_device *ieee)
//EXPORT_SYMBOL(ieee80211_start_scan_syncro);
#else
EXPORT_SYMBOL_NOVERS(ieee80211_get_beacon);
-EXPORT_SYMBOL_NOVERS(ieee80211_wake_queue);
-EXPORT_SYMBOL_NOVERS(ieee80211_stop_queue);
+EXPORT_SYMBOL_NOVERS(ieee80211_rtl_wake_queue);
+EXPORT_SYMBOL_NOVERS(ieee80211_rtl_stop_queue);
EXPORT_SYMBOL_NOVERS(ieee80211_reset_queue);
EXPORT_SYMBOL_NOVERS(ieee80211_softmac_stop_protocol);
EXPORT_SYMBOL_NOVERS(ieee80211_softmac_start_protocol);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index 103b33c..798fb41 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -604,7 +604,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
}
}
-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
+int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
struct ieee80211_device *ieee = netdev_priv(dev);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
index 4e34a1f4..3441b72 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
@@ -976,7 +976,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
if (len != ie[1]+2)
{
- printk("len:%d, ie:%d\n", len, ie[1]);
+ printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmalloc(len, GFP_KERNEL);
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
index 98b3bb6..e41e8a0 100644
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
@@ -382,7 +382,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
@@ -481,7 +481,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
rsp = ( struct ieee80211_hdr_3addr*)skb->data;
@@ -611,7 +611,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
return -1;
}
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index ff8fe7e..0ca5d8b 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5795,7 +5795,7 @@ static void rtl8192_rx(struct net_device *dev)
stats.fragoffset = 0;
stats.ntotalfrag = 1;
- if(!ieee80211_rx(priv->ieee80211, skb, &stats)){
+ if(!ieee80211_rtl_rx(priv->ieee80211, skb, &stats)){
dev_kfree_skb_any(skb);
} else {
priv->stats.rxok++;
@@ -5837,7 +5837,7 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_do_ioctl = rtl8192_ioctl,
.ndo_set_multicast_list = r8192_set_multicast,
.ndo_set_mac_address = r8192_set_mac_adr,
- .ndo_start_xmit = ieee80211_xmit,
+ .ndo_start_xmit = ieee80211_rtl_xmit,
};
/****************************************************************************
@@ -6121,14 +6121,14 @@ static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
-extern int ieee80211_init(void);
-extern void ieee80211_exit(void);
+extern int ieee80211_rtl_init(void);
+extern void ieee80211_rtl_exit(void);
static int __init rtl8192_pci_module_init(void)
{
int retval;
- retval = ieee80211_init();
+ retval = ieee80211_rtl_init();
if (retval)
return retval;
@@ -6153,7 +6153,7 @@ static void __exit rtl8192_pci_module_exit(void)
RT_TRACE(COMP_DOWN, "Exiting");
rtl8192_proc_module_remove();
- ieee80211_exit();
+ ieee80211_rtl_exit();
}
//warning message WB
@@ -6313,7 +6313,7 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
spin_unlock_irqrestore(&priv->tx_lock,flags);
if(enough_desc)
- ieee80211_wake_queue(priv->ieee80211);
+ ieee80211_rtl_wake_queue(priv->ieee80211);
#endif
}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index f22d024..9a4c858 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -1721,13 +1721,13 @@ extern int ieee80211_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-extern int rtl8192_ieee80211_xmit(struct sk_buff *skb,
+extern int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev);
extern void ieee80211_txb_free(struct ieee80211_txb *);
/* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
@@ -1783,8 +1783,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
+extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
index ac223ce..fecfa12 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
@@ -208,7 +208,7 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
*
* Responsible for handling management control frames
*
- * Called by ieee80211_rx */
+ * Called by ieee80211_rtl_rx */
static inline int
ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats, u16 type,
@@ -289,7 +289,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
return 0;
}
-/* Called only as a tasklet (software IRQ), by ieee80211_rx */
+/* Called only as a tasklet (software IRQ), by ieee80211_rtl_rx */
static inline int
ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt)
@@ -858,7 +858,7 @@ u8 parse_subframe(struct sk_buff *skb,
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ). */
-int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats)
{
struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index 203c0a5..95d4f84 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -610,7 +610,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
}
/* called with ieee->lock held */
-void ieee80211_start_scan(struct ieee80211_device *ieee)
+void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
{
if(IS_DOT11D_ENABLE(ieee) )
{
@@ -1281,7 +1281,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
}
}
-void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
+void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
{
u8 *c;
struct sk_buff *skb;
@@ -2054,7 +2054,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
ieee80211_associate_step2(ieee);
}else{
- ieee80211_auth_challenge(ieee, challenge, chlen);
+ ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
}
}else{
ieee->softmac_stats.rx_auth_rs_err++;
@@ -2162,7 +2162,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
* to check it any more.
* */
//printk("error:no descriptor left@queue_index %d, %d, %d\n", queue_index, skb_queue_len(&ieee->skb_waitQ[queue_index]), ieee->check_nic_enough_desc(ieee->dev,queue_index));
- //ieee80211_stop_queue(ieee);
+ //ieee80211_rtl_stop_queue(ieee);
skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
}else{
ieee->softmac_data_hard_start_xmit(
@@ -2222,7 +2222,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
}
-void ieee80211_wake_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2263,7 +2263,7 @@ exit :
}
-void ieee80211_stop_queue(struct ieee80211_device *ieee)
+void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
{
//unsigned long flags;
//spin_lock_irqsave(&ieee->lock,flags);
@@ -2479,7 +2479,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
if (ieee->state == IEEE80211_NOLINK){
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -2552,7 +2552,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work)
if(ieee->state == IEEE80211_NOLINK)
{
ieee->actscanning = true;
- ieee80211_start_scan(ieee);
+ ieee80211_rtl_start_scan(ieee);
}
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
index 60621d6..4d54e1e 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
@@ -604,7 +604,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
}
}
-int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
+int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index 66274d7..ccb9d5b 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -126,6 +126,8 @@ static struct usb_device_id rtl8192_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3301)},
/* Zinwell */
{USB_DEVICE(0x5a57, 0x0290)},
+ /* Guillemot */
+ {USB_DEVICE(0x06f8, 0xe031)},
//92SU
{USB_DEVICE(0x0bda, 0x8172)},
{}
@@ -1501,7 +1503,7 @@ static void rtl8192_rx_isr(struct urb *urb)
urb->context = skb;
skb_queue_tail(&priv->rx_queue, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if(err && err != EPERM)
+ if(err && err != -EPERM)
printk("can not submit rxurb, err is %x,URB status is %x\n",err,urb->status);
}
@@ -7155,7 +7157,7 @@ void rtl8192SU_rx_nomal(struct sk_buff* skb)
unicast_packet = true;
}
- if(!ieee80211_rx(priv->ieee80211,skb, &stats)) {
+ if(!ieee80211_rtl_rx(priv->ieee80211,skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
// priv->stats.rxoktotal++; //YJ,test,090108
@@ -7426,7 +7428,7 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_set_mac_address = r8192_set_mac_adr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
- .ndo_start_xmit = rtl8192_ieee80211_xmit,
+ .ndo_start_xmit = rtl8192_ieee80211_rtl_xmit,
};
static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
@@ -7619,7 +7621,7 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
spin_unlock_irqrestore(&priv->tx_lock,flags);
if(enough_desc)
- ieee80211_wake_queue(priv->ieee80211);
+ ieee80211_rtl_wake_queue(priv->ieee80211);
}
void EnableHWSecurityConfig8192(struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index d397f1d..5f12d62 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -845,7 +845,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
{
if (len != ie[1]+2)
{
- printk("len:%d, ie:%d\n", len, ie[1]);
+ printk("len:%zu, ie:%d\n", len, ie[1]);
return -EINVAL;
}
buf = kmalloc(len, GFP_KERNEL);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 26af43b..512a57a 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -340,7 +340,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
@@ -439,7 +439,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
rsp = ( struct ieee80211_hdr_3addr*)skb->data;
@@ -569,7 +569,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
{
- IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
return -1;
}
diff --git a/drivers/staging/sm7xx/Kconfig b/drivers/staging/sm7xx/Kconfig
new file mode 100644
index 0000000..204dbfc
--- /dev/null
+++ b/drivers/staging/sm7xx/Kconfig
@@ -0,0 +1,15 @@
+config FB_SM7XX
+ tristate "Silicon Motion SM7XX Frame Buffer Support"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame Buffer driver for the Silicon Motion SM7XX serial graphic card.
+
+config FB_SM7XX_ACCEL
+ bool "Siliconmotion Acceleration functions (EXPERIMENTAL)"
+ depends on FB_SM7XX && EXPERIMENTAL
+ help
+ This will compile the Trident frame buffer device with
+ acceleration functions.
diff --git a/drivers/staging/sm7xx/Makefile b/drivers/staging/sm7xx/Makefile
new file mode 100644
index 0000000..f43cb91
--- /dev/null
+++ b/drivers/staging/sm7xx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_FB_SM7XX) += sm7xx.o
+
+sm7xx-y := smtcfb.o
diff --git a/drivers/staging/sm7xx/TODO b/drivers/staging/sm7xx/TODO
new file mode 100644
index 0000000..1f61f5e
--- /dev/null
+++ b/drivers/staging/sm7xx/TODO
@@ -0,0 +1,10 @@
+TODO:
+- Dual head support
+- use kernel coding style
+- checkpatch.pl clean
+- refine the code and remove unused code
+- use kernel framebuffer mode setting instead of hard code
+- move it to drivers/video/sm7xx/ or make it be drivers/video/sm7xxfb.c
+
+Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
+Teddy Wang <teddy.wang@siliconmotion.com.cn>.
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
new file mode 100644
index 0000000..133b86c
--- /dev/null
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -0,0 +1,979 @@
+/*
+ * Silicon Motion SM7XX 2D drawing engine functions.
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Author: Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Version 0.10.26192.21.01
+ * - Add PowerPC support
+ * - Add 2D support for Lynx -
+ * Verified on 2.6.19.2
+ * Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ */
+
+unsigned char smtc_de_busy;
+
+void SMTC_write2Dreg(unsigned long nOffset, unsigned long nData)
+{
+ writel(nData, smtc_2DBaseAddress + nOffset);
+}
+
+unsigned long SMTC_read2Dreg(unsigned long nOffset)
+{
+ return readl(smtc_2DBaseAddress + nOffset);
+}
+
+void SMTC_write2Ddataport(unsigned long nOffset, unsigned long nData)
+{
+ writel(nData, smtc_2Ddataport + nOffset);
+}
+
+/**********************************************************************
+ *
+ * deInit
+ *
+ * Purpose
+ * Drawing engine initialization.
+ *
+ **********************************************************************/
+
+void deInit(unsigned int nModeWidth, unsigned int nModeHeight,
+ unsigned int bpp)
+{
+ /* Get current power configuration. */
+ unsigned char clock;
+ clock = smtc_seqr(0x21);
+
+ /* initialize global 'mutex lock' variable */
+ smtc_de_busy = 0;
+
+ /* Enable 2D Drawing Engine */
+ smtc_seqw(0x21, clock & 0xF8);
+
+ SMTC_write2Dreg(DE_CLIP_TL,
+ FIELD_VALUE(0, DE_CLIP_TL, TOP, 0) |
+ FIELD_SET(0, DE_CLIP_TL, STATUS, DISABLE) |
+ FIELD_SET(0, DE_CLIP_TL, INHIBIT, OUTSIDE) |
+ FIELD_VALUE(0, DE_CLIP_TL, LEFT, 0));
+
+ if (bpp >= 24) {
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ nModeWidth * 3) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ nModeWidth
+ * 3));
+ } else {
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ nModeWidth) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ nModeWidth));
+ }
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ nModeWidth) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ nModeWidth));
+
+ switch (bpp) {
+ case 8:
+ SMTC_write2Dreg(DE_STRETCH_FORMAT,
+ FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
+ NORMAL) | FIELD_VALUE(0,
+ DE_STRETCH_FORMAT,
+ PATTERN_Y,
+ 0) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
+ 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
+ PIXEL_FORMAT,
+ 8) | FIELD_SET(0,
+ DE_STRETCH_FORMAT,
+ ADDRESSING,
+ XY) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT,
+ SOURCE_HEIGHT, 3));
+ break;
+ case 24:
+ SMTC_write2Dreg(DE_STRETCH_FORMAT,
+ FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
+ NORMAL) | FIELD_VALUE(0,
+ DE_STRETCH_FORMAT,
+ PATTERN_Y,
+ 0) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
+ 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
+ PIXEL_FORMAT,
+ 24) | FIELD_SET(0,
+ DE_STRETCH_FORMAT,
+ ADDRESSING,
+ XY) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT,
+ SOURCE_HEIGHT, 3));
+ break;
+ case 16:
+ default:
+ SMTC_write2Dreg(DE_STRETCH_FORMAT,
+ FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
+ NORMAL) | FIELD_VALUE(0,
+ DE_STRETCH_FORMAT,
+ PATTERN_Y,
+ 0) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
+ 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
+ PIXEL_FORMAT,
+ 16) | FIELD_SET(0,
+ DE_STRETCH_FORMAT,
+ ADDRESSING,
+ XY) |
+ FIELD_VALUE(0, DE_STRETCH_FORMAT,
+ SOURCE_HEIGHT, 3));
+ break;
+ }
+
+ SMTC_write2Dreg(DE_MASKS,
+ FIELD_VALUE(0, DE_MASKS, BYTE_MASK, 0xFFFF) |
+ FIELD_VALUE(0, DE_MASKS, BIT_MASK, 0xFFFF));
+ SMTC_write2Dreg(DE_COLOR_COMPARE_MASK,
+ FIELD_VALUE(0, DE_COLOR_COMPARE_MASK, MASKS, \
+ 0xFFFFFF));
+ SMTC_write2Dreg(DE_COLOR_COMPARE,
+ FIELD_VALUE(0, DE_COLOR_COMPARE, COLOR, 0xFFFFFF));
+}
+
+void deVerticalLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX,
+ unsigned long nY,
+ unsigned long dst_height, unsigned long nColor)
+{
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
+ dst_base));
+
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION, dst_pitch) |
+ FIELD_VALUE(0, DE_PITCH, SOURCE, dst_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0, DE_WINDOW_WIDTH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_FOREGROUND,
+ FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, nX) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, nY));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, 1) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
+
+ SMTC_write2Dreg(DE_CONTROL,
+ FIELD_SET(0, DE_CONTROL, STATUS, START) |
+ FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
+ FIELD_SET(0, DE_CONTROL, MAJOR, Y) |
+ FIELD_SET(0, DE_CONTROL, STEP_X, NEGATIVE) |
+ FIELD_SET(0, DE_CONTROL, STEP_Y, POSITIVE) |
+ FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
+ FIELD_SET(0, DE_CONTROL, COMMAND, SHORT_STROKE) |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C));
+
+ smtc_de_busy = 1;
+}
+
+void deHorizontalLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX,
+ unsigned long nY,
+ unsigned long dst_width, unsigned long nColor)
+{
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
+ dst_base));
+
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION, dst_pitch) |
+ FIELD_VALUE(0, DE_PITCH, SOURCE, dst_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0, DE_WINDOW_WIDTH,
+ SOURCE,
+ dst_pitch));
+ SMTC_write2Dreg(DE_FOREGROUND,
+ FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP,
+ DISABLE) | FIELD_VALUE(0, DE_DESTINATION, X,
+ nX) | FIELD_VALUE(0,
+ DE_DESTINATION,
+ Y,
+ nY));
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X,
+ dst_width) | FIELD_VALUE(0, DE_DIMENSION,
+ Y_ET, 1));
+ SMTC_write2Dreg(DE_CONTROL,
+ FIELD_SET(0, DE_CONTROL, STATUS, START) | FIELD_SET(0,
+ DE_CONTROL,
+ DIRECTION,
+ RIGHT_TO_LEFT)
+ | FIELD_SET(0, DE_CONTROL, MAJOR, X) | FIELD_SET(0,
+ DE_CONTROL,
+ STEP_X,
+ POSITIVE)
+ | FIELD_SET(0, DE_CONTROL, STEP_Y,
+ NEGATIVE) | FIELD_SET(0, DE_CONTROL,
+ LAST_PIXEL,
+ OFF) | FIELD_SET(0,
+ DE_CONTROL,
+ COMMAND,
+ SHORT_STROKE)
+ | FIELD_SET(0, DE_CONTROL, ROP_SELECT,
+ ROP2) | FIELD_VALUE(0, DE_CONTROL, ROP,
+ 0x0C));
+
+ smtc_de_busy = 1;
+}
+
+void deLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX1,
+ unsigned long nY1,
+ unsigned long nX2, unsigned long nY2, unsigned long nColor)
+{
+ unsigned long nCommand =
+ FIELD_SET(0, DE_CONTROL, STATUS, START) |
+ FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
+ FIELD_SET(0, DE_CONTROL, MAJOR, X) |
+ FIELD_SET(0, DE_CONTROL, STEP_X, POSITIVE) |
+ FIELD_SET(0, DE_CONTROL, STEP_Y, POSITIVE) |
+ FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C);
+ unsigned long DeltaX;
+ unsigned long DeltaY;
+
+ /* Calculate delta X */
+ if (nX1 <= nX2)
+ DeltaX = nX2 - nX1;
+ else {
+ DeltaX = nX1 - nX2;
+ nCommand = FIELD_SET(nCommand, DE_CONTROL, STEP_X, NEGATIVE);
+ }
+
+ /* Calculate delta Y */
+ if (nY1 <= nY2)
+ DeltaY = nY2 - nY1;
+ else {
+ DeltaY = nY1 - nY2;
+ nCommand = FIELD_SET(nCommand, DE_CONTROL, STEP_Y, NEGATIVE);
+ }
+
+ /* Determine the major axis */
+ if (DeltaX < DeltaY)
+ nCommand = FIELD_SET(nCommand, DE_CONTROL, MAJOR, Y);
+
+ /* Vertical line? */
+ if (nX1 == nX2)
+ deVerticalLine(dst_base, dst_pitch, nX1, nY1, DeltaY, nColor);
+
+ /* Horizontal line? */
+ else if (nY1 == nY2)
+ deHorizontalLine(dst_base, dst_pitch, nX1, nY1, \
+ DeltaX, nColor);
+
+ /* Diagonal line? */
+ else if (DeltaX == DeltaY) {
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE,
+ ADDRESS, dst_base));
+
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_FOREGROUND,
+ FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, 1) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, nY1));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, 1) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, DeltaX));
+
+ SMTC_write2Dreg(DE_CONTROL,
+ FIELD_SET(nCommand, DE_CONTROL, COMMAND,
+ SHORT_STROKE));
+ }
+
+ /* Generic line */
+ else {
+ unsigned int k1, k2, et, w;
+ if (DeltaX < DeltaY) {
+ k1 = 2 * DeltaX;
+ et = k1 - DeltaY;
+ k2 = et - DeltaY;
+ w = DeltaY + 1;
+ } else {
+ k1 = 2 * DeltaY;
+ et = k1 - DeltaX;
+ k2 = et - DeltaX;
+ w = DeltaX + 1;
+ }
+
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE,
+ ADDRESS, dst_base));
+
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_FOREGROUND,
+ FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
+
+ SMTC_write2Dreg(DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1, k1) |
+ FIELD_VALUE(0, DE_SOURCE, Y_K2, k2));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, nX1) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, nY1));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, w) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, et));
+
+ SMTC_write2Dreg(DE_CONTROL,
+ FIELD_SET(nCommand, DE_CONTROL, COMMAND,
+ LINE_DRAW));
+ }
+
+ smtc_de_busy = 1;
+}
+
+void deFillRect(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long dst_X,
+ unsigned long dst_Y,
+ unsigned long dst_width,
+ unsigned long dst_height, unsigned long nColor)
+{
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
+ dst_base));
+
+ if (dst_pitch) {
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ dst_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ dst_pitch));
+ }
+
+ SMTC_write2Dreg(DE_FOREGROUND,
+ FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, dst_X) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, dst_Y));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, dst_width) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
+
+ SMTC_write2Dreg(DE_CONTROL,
+ FIELD_SET(0, DE_CONTROL, STATUS, START) |
+ FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
+ FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
+ FIELD_SET(0, DE_CONTROL, COMMAND, RECTANGLE_FILL) |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C));
+
+ smtc_de_busy = 1;
+}
+
+/**********************************************************************
+ *
+ * deRotatePattern
+ *
+ * Purpose
+ * Rotate the given pattern if necessary
+ *
+ * Parameters
+ * [in]
+ * pPattern - Pointer to DE_SURFACE structure containing
+ * pattern attributes
+ * patternX - X position (0-7) of pattern origin
+ * patternY - Y position (0-7) of pattern origin
+ *
+ * [out]
+ * pattern_dstaddr - Pointer to pre-allocated buffer containing
+ * rotated pattern
+ *
+ **********************************************************************/
+void deRotatePattern(unsigned char *pattern_dstaddr,
+ unsigned long pattern_src_addr,
+ unsigned long pattern_BPP,
+ unsigned long pattern_stride, int patternX, int patternY)
+{
+ unsigned int i;
+ unsigned long pattern[PATTERN_WIDTH * PATTERN_HEIGHT];
+ unsigned int x, y;
+ unsigned char *pjPatByte;
+
+ if (pattern_dstaddr != NULL) {
+ deWaitForNotBusy();
+
+ if (patternX || patternY) {
+ /* Rotate pattern */
+ pjPatByte = (unsigned char *)pattern;
+
+ switch (pattern_BPP) {
+ case 8:
+ {
+ for (y = 0; y < 8; y++) {
+ unsigned char *pjBuffer =
+ pattern_dstaddr +
+ ((patternY + y) & 7) * 8;
+ for (x = 0; x < 8; x++) {
+ pjBuffer[(patternX +
+ x) & 7] =
+ pjPatByte[x];
+ }
+ pjPatByte += pattern_stride;
+ }
+ break;
+ }
+
+ case 16:
+ {
+ for (y = 0; y < 8; y++) {
+ unsigned short *pjBuffer =
+ (unsigned short *)
+ pattern_dstaddr +
+ ((patternY + y) & 7) * 8;
+ for (x = 0; x < 8; x++) {
+ pjBuffer[(patternX +
+ x) & 7] =
+ ((unsigned short *)
+ pjPatByte)[x];
+ }
+ pjPatByte += pattern_stride;
+ }
+ break;
+ }
+
+ case 32:
+ {
+ for (y = 0; y < 8; y++) {
+ unsigned long *pjBuffer =
+ (unsigned long *)
+ pattern_dstaddr +
+ ((patternY + y) & 7) * 8;
+ for (x = 0; x < 8; x++) {
+ pjBuffer[(patternX +
+ x) & 7] =
+ ((unsigned long *)
+ pjPatByte)[x];
+ }
+ pjPatByte += pattern_stride;
+ }
+ break;
+ }
+ }
+ } else {
+ /*Don't rotate,just copy pattern into pattern_dstaddr*/
+ for (i = 0; i < (pattern_BPP * 2); i++) {
+ ((unsigned long *)pattern_dstaddr)[i] =
+ pattern[i];
+ }
+ }
+
+ }
+}
+
+/**********************************************************************
+ *
+ * deCopy
+ *
+ * Purpose
+ * Copy a rectangular area of the source surface to a destination surface
+ *
+ * Remarks
+ * Source bitmap must have the same color depth (BPP) as the destination
+ * bitmap.
+ *
+**********************************************************************/
+void deCopy(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long dst_BPP,
+ unsigned long dst_X,
+ unsigned long dst_Y,
+ unsigned long dst_width,
+ unsigned long dst_height,
+ unsigned long src_base,
+ unsigned long src_pitch,
+ unsigned long src_X,
+ unsigned long src_Y, pTransparent pTransp, unsigned char nROP2)
+{
+ unsigned long nDirection = 0;
+ unsigned long nTransparent = 0;
+ /* Direction of ROP2 operation:
+ * 1 = Left to Right,
+ * (-1) = Right to Left
+ */
+ unsigned long opSign = 1;
+ /* xWidth is in pixels */
+ unsigned long xWidth = 192 / (dst_BPP / 8);
+ unsigned long de_ctrl = 0;
+
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
+ FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
+ dst_base));
+
+ SMTC_write2Dreg(DE_WINDOW_SOURCE_BASE,
+ FIELD_VALUE(0, DE_WINDOW_SOURCE_BASE, ADDRESS,
+ src_base));
+
+ if (dst_pitch && src_pitch) {
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ src_pitch));
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ dst_pitch) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ src_pitch));
+ }
+
+ /* Set transparent bits if necessary */
+ if (pTransp != NULL) {
+ nTransparent =
+ pTransp->match | pTransp->select | pTransp->control;
+
+ /* Set color compare register */
+ SMTC_write2Dreg(DE_COLOR_COMPARE,
+ FIELD_VALUE(0, DE_COLOR_COMPARE, COLOR,
+ pTransp->color));
+ }
+
+ /* Determine direction of operation */
+ if (src_Y < dst_Y) {
+ /* +----------+
+ |S |
+ | +----------+
+ | | | |
+ | | | |
+ +---|------+ |
+ | D |
+ +----------+ */
+
+ nDirection = BOTTOM_TO_TOP;
+ } else if (src_Y > dst_Y) {
+ /* +----------+
+ |D |
+ | +----------+
+ | | | |
+ | | | |
+ +---|------+ |
+ | S |
+ +----------+ */
+
+ nDirection = TOP_TO_BOTTOM;
+ } else {
+ /* src_Y == dst_Y */
+
+ if (src_X <= dst_X) {
+ /* +------+---+------+
+ |S | | D|
+ | | | |
+ | | | |
+ | | | |
+ +------+---+------+ */
+
+ nDirection = RIGHT_TO_LEFT;
+ } else {
+ /* src_X > dst_X */
+
+ /* +------+---+------+
+ |D | | S|
+ | | | |
+ | | | |
+ | | | |
+ +------+---+------+ */
+
+ nDirection = LEFT_TO_RIGHT;
+ }
+ }
+
+ if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT)) {
+ src_X += dst_width - 1;
+ src_Y += dst_height - 1;
+ dst_X += dst_width - 1;
+ dst_Y += dst_height - 1;
+ opSign = (-1);
+ }
+
+ if (dst_BPP >= 24) {
+ src_X *= 3;
+ src_Y *= 3;
+ dst_X *= 3;
+ dst_Y *= 3;
+ dst_width *= 3;
+ if ((nDirection == BOTTOM_TO_TOP)
+ || (nDirection == RIGHT_TO_LEFT)) {
+ src_X += 2;
+ dst_X += 2;
+ }
+ }
+
+ /* Workaround for 192 byte hw bug */
+ if ((nROP2 != 0x0C) && ((dst_width * (dst_BPP / 8)) >= 192)) {
+ /*
+ * Perform the ROP2 operation in chunks of (xWidth *
+ * dst_height)
+ */
+ while (1) {
+ deWaitForNotBusy();
+
+ SMTC_write2Dreg(DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1, src_X) |
+ FIELD_VALUE(0, DE_SOURCE, Y_K2, src_Y));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP,
+ DISABLE) | FIELD_VALUE(0,
+ DE_DESTINATION,
+ X,
+ dst_X)
+ | FIELD_VALUE(0, DE_DESTINATION, Y,
+ dst_Y));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X,
+ xWidth) | FIELD_VALUE(0,
+ DE_DIMENSION,
+ Y_ET,
+ dst_height));
+
+ de_ctrl =
+ FIELD_VALUE(0, DE_CONTROL, ROP,
+ nROP2) | nTransparent | FIELD_SET(0,
+ DE_CONTROL,
+ ROP_SELECT,
+ ROP2)
+ | FIELD_SET(0, DE_CONTROL, COMMAND,
+ BITBLT) | ((nDirection ==
+ 1) ? FIELD_SET(0,
+ DE_CONTROL,
+ DIRECTION,
+ RIGHT_TO_LEFT)
+ : FIELD_SET(0, DE_CONTROL,
+ DIRECTION,
+ LEFT_TO_RIGHT)) |
+ FIELD_SET(0, DE_CONTROL, STATUS, START);
+
+ SMTC_write2Dreg(DE_CONTROL, de_ctrl);
+
+ src_X += (opSign * xWidth);
+ dst_X += (opSign * xWidth);
+ dst_width -= xWidth;
+
+ if (dst_width <= 0) {
+ /* ROP2 operation is complete */
+ break;
+ }
+
+ if (xWidth > dst_width)
+ xWidth = dst_width;
+ }
+ } else {
+ deWaitForNotBusy();
+ SMTC_write2Dreg(DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1, src_X) |
+ FIELD_VALUE(0, DE_SOURCE, Y_K2, src_Y));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, dst_X) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, dst_Y));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, dst_width) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
+
+ de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, nROP2) |
+ nTransparent |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
+ ((nDirection == 1) ? FIELD_SET(0, DE_CONTROL, DIRECTION,
+ RIGHT_TO_LEFT)
+ : FIELD_SET(0, DE_CONTROL, DIRECTION,
+ LEFT_TO_RIGHT)) | FIELD_SET(0, DE_CONTROL,
+ STATUS, START);
+ SMTC_write2Dreg(DE_CONTROL, de_ctrl);
+ }
+
+ smtc_de_busy = 1;
+}
+
+/*
+ * This function sets the pixel format that will apply to the 2D Engine.
+ */
+void deSetPixelFormat(unsigned long bpp)
+{
+ unsigned long de_format;
+
+ de_format = SMTC_read2Dreg(DE_STRETCH_FORMAT);
+
+ switch (bpp) {
+ case 8:
+ de_format =
+ FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 8);
+ break;
+ default:
+ case 16:
+ de_format =
+ FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 16);
+ break;
+ case 32:
+ de_format =
+ FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 32);
+ break;
+ }
+
+ SMTC_write2Dreg(DE_STRETCH_FORMAT, de_format);
+}
+
+/*
+ * System memory to Video memory monochrome expansion.
+ *
+ * Source is monochrome image in system memory. This function expands the
+ * monochrome data to color image in video memory.
+ */
+
+long deSystemMem2VideoMemMonoBlt(const char *pSrcbuf,
+ long srcDelta,
+ unsigned long startBit,
+ unsigned long dBase,
+ unsigned long dPitch,
+ unsigned long bpp,
+ unsigned long dx, unsigned long dy,
+ unsigned long width, unsigned long height,
+ unsigned long fColor,
+ unsigned long bColor,
+ unsigned long rop2) {
+ unsigned long bytePerPixel;
+ unsigned long ulBytesPerScan;
+ unsigned long ul4BytesPerScan;
+ unsigned long ulBytesRemain;
+ unsigned long de_ctrl = 0;
+ unsigned char ajRemain[4];
+ long i, j;
+
+ bytePerPixel = bpp / 8;
+
+ /* Just make sure the start bit is within legal range */
+ startBit &= 7;
+
+ ulBytesPerScan = (width + startBit + 7) / 8;
+ ul4BytesPerScan = ulBytesPerScan & ~3;
+ ulBytesRemain = ulBytesPerScan & 3;
+
+ if (smtc_de_busy)
+ deWaitForNotBusy();
+
+ /*
+ * 2D Source Base. Use 0 for HOST Blt.
+ */
+
+ SMTC_write2Dreg(DE_WINDOW_SOURCE_BASE, 0);
+
+ /*
+ * 2D Destination Base.
+ *
+ * It is an address offset (128 bit aligned) from the beginning of
+ * frame buffer.
+ */
+
+ SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE, dBase);
+
+ if (dPitch) {
+
+ /*
+ * Program pitch (distance between the 1st points of two
+ * adjacent lines).
+ *
+ * Note that input pitch is BYTE value, but the 2D Pitch
+ * register uses pixel values. Need Byte to pixel convertion.
+ */
+
+ SMTC_write2Dreg(DE_PITCH,
+ FIELD_VALUE(0, DE_PITCH, DESTINATION,
+ dPitch /
+ bytePerPixel) | FIELD_VALUE(0,
+ DE_PITCH,
+ SOURCE,
+ dPitch /
+ bytePerPixel));
+
+ /* Screen Window width in Pixels.
+ *
+ * 2D engine uses this value to calculate the linear address in
+ * frame buffer for a given point.
+ */
+
+ SMTC_write2Dreg(DE_WINDOW_WIDTH,
+ FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
+ (dPitch /
+ bytePerPixel)) | FIELD_VALUE(0,
+ DE_WINDOW_WIDTH,
+ SOURCE,
+ (dPitch
+ /
+ bytePerPixel)));
+ }
+ /* Note: For 2D Source in Host Write, only X_K1 field is needed, and
+ * Y_K2 field is not used. For mono bitmap, use startBit for X_K1.
+ */
+
+ SMTC_write2Dreg(DE_SOURCE,
+ FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_SOURCE, X_K1, startBit) |
+ FIELD_VALUE(0, DE_SOURCE, Y_K2, 0));
+
+ SMTC_write2Dreg(DE_DESTINATION,
+ FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+ FIELD_VALUE(0, DE_DESTINATION, X, dx) |
+ FIELD_VALUE(0, DE_DESTINATION, Y, dy));
+
+ SMTC_write2Dreg(DE_DIMENSION,
+ FIELD_VALUE(0, DE_DIMENSION, X, width) |
+ FIELD_VALUE(0, DE_DIMENSION, Y_ET, height));
+
+ SMTC_write2Dreg(DE_FOREGROUND, fColor);
+ SMTC_write2Dreg(DE_BACKGROUND, bColor);
+
+ if (bpp)
+ deSetPixelFormat(bpp);
+ /* Set the pixel format of the destination */
+
+ de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
+ FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+ FIELD_SET(0, DE_CONTROL, COMMAND, HOST_WRITE) |
+ FIELD_SET(0, DE_CONTROL, HOST, MONO) |
+ FIELD_SET(0, DE_CONTROL, STATUS, START);
+
+ SMTC_write2Dreg(DE_CONTROL, de_ctrl | deGetTransparency());
+
+ /* Write MONO data (line by line) to 2D Engine data port */
+ for (i = 0; i < height; i++) {
+ /* For each line, send the data in chunks of 4 bytes */
+ for (j = 0; j < (ul4BytesPerScan / 4); j++)
+ SMTC_write2Ddataport(0,
+ *(unsigned long *)(pSrcbuf +
+ (j * 4)));
+
+ if (ulBytesRemain) {
+ memcpy(ajRemain, pSrcbuf + ul4BytesPerScan,
+ ulBytesRemain);
+ SMTC_write2Ddataport(0, *(unsigned long *)ajRemain);
+ }
+
+ pSrcbuf += srcDelta;
+ }
+ smtc_de_busy = 1;
+
+ return 0;
+}
+
+/*
+ * This function gets the transparency status from DE_CONTROL register.
+ * It returns a double word with the transparent fields properly set,
+ * while other fields are 0.
+ */
+unsigned long deGetTransparency(void)
+{
+ unsigned long de_ctrl;
+
+ de_ctrl = SMTC_read2Dreg(DE_CONTROL);
+
+ de_ctrl &=
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT) |
+ FIELD_MASK(DE_CONTROL_TRANSPARENCY);
+
+ return de_ctrl;
+}
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
new file mode 100644
index 0000000..38d0c33
--- /dev/null
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -0,0 +1,530 @@
+/*
+ * Silicon Motion SM712 2D drawing engine functions.
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Author: Ge Wang, gewang@siliconmotion.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+/* Internal macros */
+
+#define _F_START(f) (0 ? f)
+#define _F_END(f) (1 ? f)
+#define _F_SIZE(f) (1 + _F_END(f) - _F_START(f))
+#define _F_MASK(f) (((1ULL << _F_SIZE(f)) - 1) << _F_START(f))
+#define _F_NORMALIZE(v, f) (((v) & _F_MASK(f)) >> _F_START(f))
+#define _F_DENORMALIZE(v, f) (((v) << _F_START(f)) & _F_MASK(f))
+
+/* Global macros */
+
+#define FIELD_GET(x, reg, field) \
+( \
+ _F_NORMALIZE((x), reg ## _ ## field) \
+)
+
+#define FIELD_SET(x, reg, field, value) \
+( \
+ (x & ~_F_MASK(reg ## _ ## field)) \
+ | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
+)
+
+#define FIELD_VALUE(x, reg, field, value) \
+( \
+ (x & ~_F_MASK(reg ## _ ## field)) \
+ | _F_DENORMALIZE(value, reg ## _ ## field) \
+)
+
+#define FIELD_CLEAR(reg, field) \
+( \
+ ~_F_MASK(reg ## _ ## field) \
+)
+
+/* Field Macros */
+
+#define FIELD_START(field) (0 ? field)
+#define FIELD_END(field) (1 ? field)
+#define FIELD_SIZE(field) \
+ (1 + FIELD_END(field) - FIELD_START(field))
+
+#define FIELD_MASK(field) \
+ (((1 << (FIELD_SIZE(field)-1)) \
+ | ((1 << (FIELD_SIZE(field)-1)) - 1)) \
+ << FIELD_START(field))
+
+#define FIELD_NORMALIZE(reg, field) \
+ (((reg) & FIELD_MASK(field)) >> FIELD_START(field))
+
+#define FIELD_DENORMALIZE(field, value) \
+ (((value) << FIELD_START(field)) & FIELD_MASK(field))
+
+#define FIELD_INIT(reg, field, value) \
+ FIELD_DENORMALIZE(reg ## _ ## field, \
+ reg ## _ ## field ## _ ## value)
+
+#define FIELD_INIT_VAL(reg, field, value) \
+ (FIELD_DENORMALIZE(reg ## _ ## field, value))
+
+#define FIELD_VAL_SET(x, r, f, v) ({ \
+ x = (x & ~FIELD_MASK(r ## _ ## f)) \
+ | FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v) \
+})
+
+#define RGB(r, g, b) ((unsigned long)(((r) << 16) | ((g) << 8) | (b)))
+
+/* Transparent info definition */
+typedef struct {
+ unsigned long match; /* Matching pixel is OPAQUE/TRANSPARENT */
+ unsigned long select; /* Transparency controlled by SRC/DST */
+ unsigned long control; /* ENABLE/DISABLE transparency */
+ unsigned long color; /* Transparent color */
+} Transparent, *pTransparent;
+
+#define PIXEL_DEPTH_1_BP 0 /* 1 bit per pixel */
+#define PIXEL_DEPTH_8_BPP 1 /* 8 bits per pixel */
+#define PIXEL_DEPTH_16_BPP 2 /* 16 bits per pixel */
+#define PIXEL_DEPTH_32_BPP 3 /* 32 bits per pixel */
+#define PIXEL_DEPTH_YUV422 8 /* 16 bits per pixel YUV422 */
+#define PIXEL_DEPTH_YUV420 9 /* 16 bits per pixel YUV420 */
+
+#define PATTERN_WIDTH 8
+#define PATTERN_HEIGHT 8
+
+#define TOP_TO_BOTTOM 0
+#define BOTTOM_TO_TOP 1
+#define RIGHT_TO_LEFT BOTTOM_TO_TOP
+#define LEFT_TO_RIGHT TOP_TO_BOTTOM
+
+/* Constants used in Transparent structure */
+#define MATCH_OPAQUE 0x00000000
+#define MATCH_TRANSPARENT 0x00000400
+#define SOURCE 0x00000000
+#define DESTINATION 0x00000200
+
+/* 2D registers. */
+
+#define DE_SOURCE 0x000000
+#define DE_SOURCE_WRAP 31 : 31
+#define DE_SOURCE_WRAP_DISABLE 0
+#define DE_SOURCE_WRAP_ENABLE 1
+#define DE_SOURCE_X_K1 29 : 16
+#define DE_SOURCE_Y_K2 15 : 0
+
+#define DE_DESTINATION 0x000004
+#define DE_DESTINATION_WRAP 31 : 31
+#define DE_DESTINATION_WRAP_DISABLE 0
+#define DE_DESTINATION_WRAP_ENABLE 1
+#define DE_DESTINATION_X 28 : 16
+#define DE_DESTINATION_Y 15 : 0
+
+#define DE_DIMENSION 0x000008
+#define DE_DIMENSION_X 28 : 16
+#define DE_DIMENSION_Y_ET 15 : 0
+
+#define DE_CONTROL 0x00000C
+#define DE_CONTROL_STATUS 31 : 31
+#define DE_CONTROL_STATUS_STOP 0
+#define DE_CONTROL_STATUS_START 1
+#define DE_CONTROL_PATTERN 30 : 30
+#define DE_CONTROL_PATTERN_MONO 0
+#define DE_CONTROL_PATTERN_COLOR 1
+#define DE_CONTROL_UPDATE_DESTINATION_X 29 : 29
+#define DE_CONTROL_UPDATE_DESTINATION_X_DISABLE 0
+#define DE_CONTROL_UPDATE_DESTINATION_X_ENABLE 1
+#define DE_CONTROL_QUICK_START 28 : 28
+#define DE_CONTROL_QUICK_START_DISABLE 0
+#define DE_CONTROL_QUICK_START_ENABLE 1
+#define DE_CONTROL_DIRECTION 27 : 27
+#define DE_CONTROL_DIRECTION_LEFT_TO_RIGHT 0
+#define DE_CONTROL_DIRECTION_RIGHT_TO_LEFT 1
+#define DE_CONTROL_MAJOR 26 : 26
+#define DE_CONTROL_MAJOR_X 0
+#define DE_CONTROL_MAJOR_Y 1
+#define DE_CONTROL_STEP_X 25 : 25
+#define DE_CONTROL_STEP_X_POSITIVE 1
+#define DE_CONTROL_STEP_X_NEGATIVE 0
+#define DE_CONTROL_STEP_Y 24 : 24
+#define DE_CONTROL_STEP_Y_POSITIVE 1
+#define DE_CONTROL_STEP_Y_NEGATIVE 0
+#define DE_CONTROL_STRETCH 23 : 23
+#define DE_CONTROL_STRETCH_DISABLE 0
+#define DE_CONTROL_STRETCH_ENABLE 1
+#define DE_CONTROL_HOST 22 : 22
+#define DE_CONTROL_HOST_COLOR 0
+#define DE_CONTROL_HOST_MONO 1
+#define DE_CONTROL_LAST_PIXEL 21 : 21
+#define DE_CONTROL_LAST_PIXEL_OFF 0
+#define DE_CONTROL_LAST_PIXEL_ON 1
+#define DE_CONTROL_COMMAND 20 : 16
+#define DE_CONTROL_COMMAND_BITBLT 0
+#define DE_CONTROL_COMMAND_RECTANGLE_FILL 1
+#define DE_CONTROL_COMMAND_DE_TILE 2
+#define DE_CONTROL_COMMAND_TRAPEZOID_FILL 3
+#define DE_CONTROL_COMMAND_ALPHA_BLEND 4
+#define DE_CONTROL_COMMAND_RLE_STRIP 5
+#define DE_CONTROL_COMMAND_SHORT_STROKE 6
+#define DE_CONTROL_COMMAND_LINE_DRAW 7
+#define DE_CONTROL_COMMAND_HOST_WRITE 8
+#define DE_CONTROL_COMMAND_HOST_READ 9
+#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP 10
+#define DE_CONTROL_COMMAND_ROTATE 11
+#define DE_CONTROL_COMMAND_FONT 12
+#define DE_CONTROL_COMMAND_TEXTURE_LOAD 15
+#define DE_CONTROL_ROP_SELECT 15 : 15
+#define DE_CONTROL_ROP_SELECT_ROP3 0
+#define DE_CONTROL_ROP_SELECT_ROP2 1
+#define DE_CONTROL_ROP2_SOURCE 14 : 14
+#define DE_CONTROL_ROP2_SOURCE_BITMAP 0
+#define DE_CONTROL_ROP2_SOURCE_PATTERN 1
+#define DE_CONTROL_MONO_DATA 13 : 12
+#define DE_CONTROL_MONO_DATA_NOT_PACKED 0
+#define DE_CONTROL_MONO_DATA_8_PACKED 1
+#define DE_CONTROL_MONO_DATA_16_PACKED 2
+#define DE_CONTROL_MONO_DATA_32_PACKED 3
+#define DE_CONTROL_REPEAT_ROTATE 11 : 11
+#define DE_CONTROL_REPEAT_ROTATE_DISABLE 0
+#define DE_CONTROL_REPEAT_ROTATE_ENABLE 1
+#define DE_CONTROL_TRANSPARENCY_MATCH 10 : 10
+#define DE_CONTROL_TRANSPARENCY_MATCH_OPAQUE 0
+#define DE_CONTROL_TRANSPARENCY_MATCH_TRANSPARENT 1
+#define DE_CONTROL_TRANSPARENCY_SELECT 9 : 9
+#define DE_CONTROL_TRANSPARENCY_SELECT_SOURCE 0
+#define DE_CONTROL_TRANSPARENCY_SELECT_DESTINATION 1
+#define DE_CONTROL_TRANSPARENCY 8 : 8
+#define DE_CONTROL_TRANSPARENCY_DISABLE 0
+#define DE_CONTROL_TRANSPARENCY_ENABLE 1
+#define DE_CONTROL_ROP 7 : 0
+
+/* Pseudo fields. */
+
+#define DE_CONTROL_SHORT_STROKE_DIR 27 : 24
+#define DE_CONTROL_SHORT_STROKE_DIR_225 0
+#define DE_CONTROL_SHORT_STROKE_DIR_135 1
+#define DE_CONTROL_SHORT_STROKE_DIR_315 2
+#define DE_CONTROL_SHORT_STROKE_DIR_45 3
+#define DE_CONTROL_SHORT_STROKE_DIR_270 4
+#define DE_CONTROL_SHORT_STROKE_DIR_90 5
+#define DE_CONTROL_SHORT_STROKE_DIR_180 8
+#define DE_CONTROL_SHORT_STROKE_DIR_0 10
+#define DE_CONTROL_ROTATION 25 : 24
+#define DE_CONTROL_ROTATION_0 0
+#define DE_CONTROL_ROTATION_270 1
+#define DE_CONTROL_ROTATION_90 2
+#define DE_CONTROL_ROTATION_180 3
+
+#define DE_PITCH 0x000010
+#define DE_PITCH_DESTINATION 28 : 16
+#define DE_PITCH_SOURCE 12 : 0
+
+#define DE_FOREGROUND 0x000014
+#define DE_FOREGROUND_COLOR 31 : 0
+
+#define DE_BACKGROUND 0x000018
+#define DE_BACKGROUND_COLOR 31 : 0
+
+#define DE_STRETCH_FORMAT 0x00001C
+#define DE_STRETCH_FORMAT_PATTERN_XY 30 : 30
+#define DE_STRETCH_FORMAT_PATTERN_XY_NORMAL 0
+#define DE_STRETCH_FORMAT_PATTERN_XY_OVERWRITE 1
+#define DE_STRETCH_FORMAT_PATTERN_Y 29 : 27
+#define DE_STRETCH_FORMAT_PATTERN_X 25 : 23
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT 21 : 20
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 0
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 1
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 3
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 2
+#define DE_STRETCH_FORMAT_ADDRESSING 19 : 16
+#define DE_STRETCH_FORMAT_ADDRESSING_XY 0
+#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR 15
+#define DE_STRETCH_FORMAT_SOURCE_HEIGHT 11 : 0
+
+#define DE_COLOR_COMPARE 0x000020
+#define DE_COLOR_COMPARE_COLOR 23 : 0
+
+#define DE_COLOR_COMPARE_MASK 0x000024
+#define DE_COLOR_COMPARE_MASK_MASKS 23 : 0
+
+#define DE_MASKS 0x000028
+#define DE_MASKS_BYTE_MASK 31 : 16
+#define DE_MASKS_BIT_MASK 15 : 0
+
+#define DE_CLIP_TL 0x00002C
+#define DE_CLIP_TL_TOP 31 : 16
+#define DE_CLIP_TL_STATUS 13 : 13
+#define DE_CLIP_TL_STATUS_DISABLE 0
+#define DE_CLIP_TL_STATUS_ENABLE 1
+#define DE_CLIP_TL_INHIBIT 12 : 12
+#define DE_CLIP_TL_INHIBIT_OUTSIDE 0
+#define DE_CLIP_TL_INHIBIT_INSIDE 1
+#define DE_CLIP_TL_LEFT 11 : 0
+
+#define DE_CLIP_BR 0x000030
+#define DE_CLIP_BR_BOTTOM 31 : 16
+#define DE_CLIP_BR_RIGHT 12 : 0
+
+#define DE_MONO_PATTERN_LOW 0x000034
+#define DE_MONO_PATTERN_LOW_PATTERN 31 : 0
+
+#define DE_MONO_PATTERN_HIGH 0x000038
+#define DE_MONO_PATTERN_HIGH_PATTERN 31 : 0
+
+#define DE_WINDOW_WIDTH 0x00003C
+#define DE_WINDOW_WIDTH_DESTINATION 28 : 16
+#define DE_WINDOW_WIDTH_SOURCE 12 : 0
+
+#define DE_WINDOW_SOURCE_BASE 0x000040
+#define DE_WINDOW_SOURCE_BASE_EXT 27 : 27
+#define DE_WINDOW_SOURCE_BASE_EXT_LOCAL 0
+#define DE_WINDOW_SOURCE_BASE_EXT_EXTERNAL 1
+#define DE_WINDOW_SOURCE_BASE_CS 26 : 26
+#define DE_WINDOW_SOURCE_BASE_CS_0 0
+#define DE_WINDOW_SOURCE_BASE_CS_1 1
+#define DE_WINDOW_SOURCE_BASE_ADDRESS 25 : 0
+
+#define DE_WINDOW_DESTINATION_BASE 0x000044
+#define DE_WINDOW_DESTINATION_BASE_EXT 27 : 27
+#define DE_WINDOW_DESTINATION_BASE_EXT_LOCAL 0
+#define DE_WINDOW_DESTINATION_BASE_EXT_EXTERNAL 1
+#define DE_WINDOW_DESTINATION_BASE_CS 26 : 26
+#define DE_WINDOW_DESTINATION_BASE_CS_0 0
+#define DE_WINDOW_DESTINATION_BASE_CS_1 1
+#define DE_WINDOW_DESTINATION_BASE_ADDRESS 25 : 0
+
+#define DE_ALPHA 0x000048
+#define DE_ALPHA_VALUE 7 : 0
+
+#define DE_WRAP 0x00004C
+#define DE_WRAP_X 31 : 16
+#define DE_WRAP_Y 15 : 0
+
+#define DE_STATUS 0x000050
+#define DE_STATUS_CSC 1 : 1
+#define DE_STATUS_CSC_CLEAR 0
+#define DE_STATUS_CSC_NOT_ACTIVE 0
+#define DE_STATUS_CSC_ACTIVE 1
+#define DE_STATUS_2D 0 : 0
+#define DE_STATUS_2D_CLEAR 0
+#define DE_STATUS_2D_NOT_ACTIVE 0
+#define DE_STATUS_2D_ACTIVE 1
+
+/* Color Space Conversion registers. */
+
+#define CSC_Y_SOURCE_BASE 0x0000C8
+#define CSC_Y_SOURCE_BASE_EXT 27 : 27
+#define CSC_Y_SOURCE_BASE_EXT_LOCAL 0
+#define CSC_Y_SOURCE_BASE_EXT_EXTERNAL 1
+#define CSC_Y_SOURCE_BASE_CS 26 : 26
+#define CSC_Y_SOURCE_BASE_CS_0 0
+#define CSC_Y_SOURCE_BASE_CS_1 1
+#define CSC_Y_SOURCE_BASE_ADDRESS 25 : 0
+
+#define CSC_CONSTANTS 0x0000CC
+#define CSC_CONSTANTS_Y 31 : 24
+#define CSC_CONSTANTS_R 23 : 16
+#define CSC_CONSTANTS_G 15 : 8
+#define CSC_CONSTANTS_B 7 : 0
+
+#define CSC_Y_SOURCE_X 0x0000D0
+#define CSC_Y_SOURCE_X_INTEGER 26 : 16
+#define CSC_Y_SOURCE_X_FRACTION 15 : 3
+
+#define CSC_Y_SOURCE_Y 0x0000D4
+#define CSC_Y_SOURCE_Y_INTEGER 27 : 16
+#define CSC_Y_SOURCE_Y_FRACTION 15 : 3
+
+#define CSC_U_SOURCE_BASE 0x0000D8
+#define CSC_U_SOURCE_BASE_EXT 27 : 27
+#define CSC_U_SOURCE_BASE_EXT_LOCAL 0
+#define CSC_U_SOURCE_BASE_EXT_EXTERNAL 1
+#define CSC_U_SOURCE_BASE_CS 26 : 26
+#define CSC_U_SOURCE_BASE_CS_0 0
+#define CSC_U_SOURCE_BASE_CS_1 1
+#define CSC_U_SOURCE_BASE_ADDRESS 25 : 0
+
+#define CSC_V_SOURCE_BASE 0x0000DC
+#define CSC_V_SOURCE_BASE_EXT 27 : 27
+#define CSC_V_SOURCE_BASE_EXT_LOCAL 0
+#define CSC_V_SOURCE_BASE_EXT_EXTERNAL 1
+#define CSC_V_SOURCE_BASE_CS 26 : 26
+#define CSC_V_SOURCE_BASE_CS_0 0
+#define CSC_V_SOURCE_BASE_CS_1 1
+#define CSC_V_SOURCE_BASE_ADDRESS 25 : 0
+
+#define CSC_SOURCE_DIMENSION 0x0000E0
+#define CSC_SOURCE_DIMENSION_X 31 : 16
+#define CSC_SOURCE_DIMENSION_Y 15 : 0
+
+#define CSC_SOURCE_PITCH 0x0000E4
+#define CSC_SOURCE_PITCH_Y 31 : 16
+#define CSC_SOURCE_PITCH_UV 15 : 0
+
+#define CSC_DESTINATION 0x0000E8
+#define CSC_DESTINATION_WRAP 31 : 31
+#define CSC_DESTINATION_WRAP_DISABLE 0
+#define CSC_DESTINATION_WRAP_ENABLE 1
+#define CSC_DESTINATION_X 27 : 16
+#define CSC_DESTINATION_Y 11 : 0
+
+#define CSC_DESTINATION_DIMENSION 0x0000EC
+#define CSC_DESTINATION_DIMENSION_X 31 : 16
+#define CSC_DESTINATION_DIMENSION_Y 15 : 0
+
+#define CSC_DESTINATION_PITCH 0x0000F0
+#define CSC_DESTINATION_PITCH_X 31 : 16
+#define CSC_DESTINATION_PITCH_Y 15 : 0
+
+#define CSC_SCALE_FACTOR 0x0000F4
+#define CSC_SCALE_FACTOR_HORIZONTAL 31 : 16
+#define CSC_SCALE_FACTOR_VERTICAL 15 : 0
+
+#define CSC_DESTINATION_BASE 0x0000F8
+#define CSC_DESTINATION_BASE_EXT 27 : 27
+#define CSC_DESTINATION_BASE_EXT_LOCAL 0
+#define CSC_DESTINATION_BASE_EXT_EXTERNAL 1
+#define CSC_DESTINATION_BASE_CS 26 : 26
+#define CSC_DESTINATION_BASE_CS_0 0
+#define CSC_DESTINATION_BASE_CS_1 1
+#define CSC_DESTINATION_BASE_ADDRESS 25 : 0
+
+#define CSC_CONTROL 0x0000FC
+#define CSC_CONTROL_STATUS 31 : 31
+#define CSC_CONTROL_STATUS_STOP 0
+#define CSC_CONTROL_STATUS_START 1
+#define CSC_CONTROL_SOURCE_FORMAT 30 : 28
+#define CSC_CONTROL_SOURCE_FORMAT_YUV422 0
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420I 1
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420 2
+#define CSC_CONTROL_SOURCE_FORMAT_YVU9 3
+#define CSC_CONTROL_SOURCE_FORMAT_IYU1 4
+#define CSC_CONTROL_SOURCE_FORMAT_IYU2 5
+#define CSC_CONTROL_SOURCE_FORMAT_RGB565 6
+#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 7
+#define CSC_CONTROL_DESTINATION_FORMAT 27 : 26
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 0
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 1
+#define CSC_CONTROL_HORIZONTAL_FILTER 25 : 25
+#define CSC_CONTROL_HORIZONTAL_FILTER_DISABLE 0
+#define CSC_CONTROL_HORIZONTAL_FILTER_ENABLE 1
+#define CSC_CONTROL_VERTICAL_FILTER 24 : 24
+#define CSC_CONTROL_VERTICAL_FILTER_DISABLE 0
+#define CSC_CONTROL_VERTICAL_FILTER_ENABLE 1
+#define CSC_CONTROL_BYTE_ORDER 23 : 23
+#define CSC_CONTROL_BYTE_ORDER_YUYV 0
+#define CSC_CONTROL_BYTE_ORDER_UYVY 1
+
+#define DE_DATA_PORT_501 0x110000
+#define DE_DATA_PORT_712 0x400000
+#define DE_DATA_PORT_722 0x6000
+
+/* point to virtual Memory Map IO starting address */
+extern char *smtc_RegBaseAddress;
+/* point to virtual video memory starting address */
+extern char *smtc_VRAMBaseAddress;
+extern unsigned char smtc_de_busy;
+
+extern unsigned long memRead32(unsigned long nOffset);
+extern void memWrite32(unsigned long nOffset, unsigned long nData);
+extern unsigned long SMTC_read2Dreg(unsigned long nOffset);
+
+/* 2D functions */
+extern void deInit(unsigned int nModeWidth, unsigned int nModeHeight,
+ unsigned int bpp);
+
+extern void deWaitForNotBusy(void);
+
+extern void deVerticalLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX,
+ unsigned long nY,
+ unsigned long dst_height,
+ unsigned long nColor);
+
+extern void deHorizontalLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX,
+ unsigned long nY,
+ unsigned long dst_width,
+ unsigned long nColor);
+
+extern void deLine(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long nX1,
+ unsigned long nY1,
+ unsigned long nX2,
+ unsigned long nY2,
+ unsigned long nColor);
+
+extern void deFillRect(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long dst_X,
+ unsigned long dst_Y,
+ unsigned long dst_width,
+ unsigned long dst_height,
+ unsigned long nColor);
+
+extern void deRotatePattern(unsigned char *pattern_dstaddr,
+ unsigned long pattern_src_addr,
+ unsigned long pattern_BPP,
+ unsigned long pattern_stride,
+ int patternX,
+ int patternY);
+
+extern void deCopy(unsigned long dst_base,
+ unsigned long dst_pitch,
+ unsigned long dst_BPP,
+ unsigned long dst_X,
+ unsigned long dst_Y,
+ unsigned long dst_width,
+ unsigned long dst_height,
+ unsigned long src_base,
+ unsigned long src_pitch,
+ unsigned long src_X,
+ unsigned long src_Y,
+ pTransparent pTransp,
+ unsigned char nROP2);
+
+/*
+ * System memory to Video memory monochrome expansion.
+ *
+ * Source is monochrome image in system memory. This function expands the
+ * monochrome data to color image in video memory.
+ *
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top
+ * down and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should
+ * be 0 to 7
+ * @dBase: Address of destination : offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bpp: Color depth of destination surface
+ * @dx, dy: Starting coordinate of destination surface
+ * @width, height: width and height of rectange in pixel value
+ * @fColor,bColor: Foreground, Background color (corresponding to a 1, 0 in
+ * the monochrome data)
+ * @rop2: ROP value
+ */
+
+extern long deSystemMem2VideoMemMonoBlt(
+ const char *pSrcbuf,
+ long srcDelta,
+ unsigned long startBit,
+ unsigned long dBase,
+ unsigned long dPitch,
+ unsigned long bpp,
+ unsigned long dx, unsigned long dy,
+ unsigned long width, unsigned long height,
+ unsigned long fColor,
+ unsigned long bColor,
+ unsigned long rop2);
+
+extern unsigned long deGetTransparency(void);
+extern void deSetPixelFormat(unsigned long bpp);
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
new file mode 100644
index 0000000..161dbc9
--- /dev/null
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -0,0 +1,1253 @@
+/*
+ * Silicon Motion SM7XX frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors: Ge Wang, gewang@siliconmotion.com
+ * Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Version 0.10.26192.21.01
+ * - Add PowerPC/Big endian support
+ * - Add 2D support for Lynx
+ * - Verified on2.6.19.2 Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ *
+ * Version 0.09.2621.00.01
+ * - Only support Linux Kernel's version 2.6.21.
+ * Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ *
+ * Version 0.09
+ * - Only support Linux Kernel's version 2.6.12.
+ * Boyod.yang <boyod.yang@siliconmotion.com.cn>
+ */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+#endif
+
+struct screen_info smtc_screen_info;
+
+#include "smtcfb.h"
+#include "smtc2d.h"
+
+#ifdef DEBUG
+#define smdbg(format, arg...) printk(KERN_DEBUG format , ## arg)
+#else
+#define smdbg(format, arg...)
+#endif
+
+/*
+* Private structure
+*/
+struct smtcfb_info {
+ /*
+ * The following is a pointer to be passed into the
+ * functions below. The modules outside the main
+ * voyager.c driver have no knowledge as to what
+ * is within this structure.
+ */
+ struct fb_info fb;
+ struct display_switch *dispsw;
+ struct pci_dev *dev;
+ signed int currcon;
+
+ struct {
+ u8 red, green, blue;
+ } palette[NR_RGB];
+
+ u_int palette_size;
+};
+
+struct par_info {
+ /*
+ * Hardware
+ */
+ u16 chipID;
+ unsigned char __iomem *m_pMMIO;
+ char __iomem *m_pLFB;
+ char *m_pDPR;
+ char *m_pVPR;
+ char *m_pCPR;
+
+ u_int width;
+ u_int height;
+ u_int hz;
+ u_long BaseAddressInVRAM;
+ u8 chipRevID;
+};
+
+struct vesa_mode_table {
+ char mode_index[6];
+ u16 lfb_width;
+ u16 lfb_height;
+ u16 lfb_depth;
+};
+
+static struct vesa_mode_table vesa_mode[] = {
+ {"0x301", 640, 480, 8},
+ {"0x303", 800, 600, 8},
+ {"0x305", 1024, 768, 8},
+ {"0x307", 1280, 1024, 8},
+
+ {"0x311", 640, 480, 16},
+ {"0x314", 800, 600, 16},
+ {"0x317", 1024, 768, 16},
+ {"0x31A", 1280, 1024, 16},
+
+ {"0x312", 640, 480, 24},
+ {"0x315", 800, 600, 24},
+ {"0x318", 1024, 768, 24},
+ {"0x31B", 1280, 1024, 24},
+};
+
+char __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */
+char __iomem *smtc_VRAMBaseAddress; /* video memory starting address */
+
+char *smtc_2DBaseAddress; /* 2D engine starting address */
+char *smtc_2Ddataport; /* 2D data port offset */
+short smtc_2Dacceleration;
+
+static u32 colreg[17];
+static struct par_info hw; /* hardware information */
+
+u16 smtc_ChipIDs[] = {
+ 0x710,
+ 0x712,
+ 0x720
+};
+
+#define numSMTCchipIDs (sizeof(smtc_ChipIDs) / sizeof(u16))
+
+void deWaitForNotBusy(void)
+{
+ unsigned long i = 0x1000000;
+ while (i--) {
+ if ((smtc_seqr(0x16) & 0x18) == 0x10)
+ break;
+ }
+ smtc_de_busy = 0;
+}
+
+static void sm712_set_timing(struct smtcfb_info *sfb,
+ struct par_info *ppar_info)
+{
+ int i = 0, j = 0;
+ u32 m_nScreenStride;
+
+ smdbg("\nppar_info->width = %d ppar_info->height = %d"
+ "sfb->fb.var.bits_per_pixel = %d ppar_info->hz = %d\n",
+ ppar_info->width, ppar_info->height,
+ sfb->fb.var.bits_per_pixel, ppar_info->hz);
+
+ for (j = 0; j < numVGAModes; j++) {
+ if (VGAMode[j].mmSizeX == ppar_info->width &&
+ VGAMode[j].mmSizeY == ppar_info->height &&
+ VGAMode[j].bpp == sfb->fb.var.bits_per_pixel &&
+ VGAMode[j].hz == ppar_info->hz) {
+
+ smdbg("\nVGAMode[j].mmSizeX = %d VGAMode[j].mmSizeY ="
+ "%d VGAMode[j].bpp = %d"
+ "VGAMode[j].hz=%d\n",
+ VGAMode[j].mmSizeX, VGAMode[j].mmSizeY,
+ VGAMode[j].bpp, VGAMode[j].hz);
+
+ smdbg("VGAMode index=%d\n", j);
+
+ smtc_mmiowb(0x0, 0x3c6);
+
+ smtc_seqw(0, 0x1);
+
+ smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2);
+
+ /* init SEQ register SR00 - SR04 */
+ for (i = 0; i < SIZE_SR00_SR04; i++)
+ smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]);
+
+ /* init SEQ register SR10 - SR24 */
+ for (i = 0; i < SIZE_SR10_SR24; i++)
+ smtc_seqw(i + 0x10,
+ VGAMode[j].Init_SR10_SR24[i]);
+
+ /* init SEQ register SR30 - SR75 */
+ for (i = 0; i < SIZE_SR30_SR75; i++)
+ if (((i + 0x30) != 0x62) \
+ && ((i + 0x30) != 0x6a) \
+ && ((i + 0x30) != 0x6b))
+ smtc_seqw(i + 0x30,
+ VGAMode[j].Init_SR30_SR75[i]);
+
+ /* init SEQ register SR80 - SR93 */
+ for (i = 0; i < SIZE_SR80_SR93; i++)
+ smtc_seqw(i + 0x80,
+ VGAMode[j].Init_SR80_SR93[i]);
+
+ /* init SEQ register SRA0 - SRAF */
+ for (i = 0; i < SIZE_SRA0_SRAF; i++)
+ smtc_seqw(i + 0xa0,
+ VGAMode[j].Init_SRA0_SRAF[i]);
+
+ /* init Graphic register GR00 - GR08 */
+ for (i = 0; i < SIZE_GR00_GR08; i++)
+ smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]);
+
+ /* init Attribute register AR00 - AR14 */
+ for (i = 0; i < SIZE_AR00_AR14; i++)
+ smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]);
+
+ /* init CRTC register CR00 - CR18 */
+ for (i = 0; i < SIZE_CR00_CR18; i++)
+ smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]);
+
+ /* init CRTC register CR30 - CR4D */
+ for (i = 0; i < SIZE_CR30_CR4D; i++)
+ smtc_crtcw(i + 0x30,
+ VGAMode[j].Init_CR30_CR4D[i]);
+
+ /* init CRTC register CR90 - CRA7 */
+ for (i = 0; i < SIZE_CR90_CRA7; i++)
+ smtc_crtcw(i + 0x90,
+ VGAMode[j].Init_CR90_CRA7[i]);
+ }
+ }
+ smtc_mmiowb(0x67, 0x3c2);
+
+ /* set VPR registers */
+ writel(0x0, ppar_info->m_pVPR + 0x0C);
+ writel(0x0, ppar_info->m_pVPR + 0x40);
+
+ /* set data width */
+ m_nScreenStride =
+ (ppar_info->width * sfb->fb.var.bits_per_pixel) / 64;
+ switch (sfb->fb.var.bits_per_pixel) {
+ case 8:
+ writel(0x0, ppar_info->m_pVPR + 0x0);
+ break;
+ case 16:
+ writel(0x00020000, ppar_info->m_pVPR + 0x0);
+ break;
+ case 24:
+ writel(0x00040000, ppar_info->m_pVPR + 0x0);
+ break;
+ case 32:
+ writel(0x00030000, ppar_info->m_pVPR + 0x0);
+ break;
+ }
+ writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride),
+ ppar_info->m_pVPR + 0x10);
+
+}
+
+static void sm712_setpalette(int regno, unsigned red, unsigned green,
+ unsigned blue, struct fb_info *info)
+{
+ struct par_info *cur_par = (struct par_info *)info->par;
+
+ if (cur_par->BaseAddressInVRAM)
+ /*
+ * second display palette for dual head. Enable CRT RAM, 6-bit
+ * RAM
+ */
+ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x20);
+ else
+ /* primary display palette. Enable LCD RAM only, 6-bit RAM */
+ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
+ smtc_mmiowb(regno, dac_reg);
+ smtc_mmiowb(red >> 10, dac_val);
+ smtc_mmiowb(green >> 10, dac_val);
+ smtc_mmiowb(blue >> 10, dac_val);
+}
+
+static void smtc_set_timing(struct smtcfb_info *sfb, struct par_info
+ *ppar_info)
+{
+ switch (ppar_info->chipID) {
+ case 0x710:
+ case 0x712:
+ case 0x720:
+ sm712_set_timing(sfb, ppar_info);
+ break;
+ }
+}
+
+static struct fb_var_screeninfo smtcfb_var = {
+ .xres = 1024,
+ .yres = 600,
+ .xres_virtual = 1024,
+ .yres_virtual = 600,
+ .bits_per_pixel = 16,
+ .red = {16, 8, 0},
+ .green = {8, 8, 0},
+ .blue = {0, 8, 0},
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static struct fb_fix_screeninfo smtcfb_fix = {
+ .id = "sm712fb",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .line_length = 800 * 3,
+ .accel = FB_ACCEL_SMI_LYNX,
+};
+
+/* chan_to_field
+ *
+ * convert a colour value into a field position
+ *
+ * from pxafb.c
+ */
+
+static inline unsigned int chan_to_field(unsigned int chan,
+ struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int smtcfb_blank(int blank_mode, struct fb_info *info)
+{
+ /* clear DPMS setting */
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ /* Screen On: HSync: On, VSync : On */
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+ break;
+ case FB_BLANK_NORMAL:
+ /* Screen Off: HSync: On, VSync : On Soft blank */
+ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ /* Screen On: HSync: On, VSync : Off */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ /* Screen On: HSync: Off, VSync : On */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ case FB_BLANK_POWERDOWN:
+ /* Screen On: HSync: Off, VSync : Off */
+ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned trans, struct fb_info *info)
+{
+ struct smtcfb_info *sfb = (struct smtcfb_info *)info;
+ u32 val;
+
+ if (regno > 255)
+ return 1;
+
+ switch (sfb->fb.fix.visual) {
+ case FB_VISUAL_DIRECTCOLOR:
+ case FB_VISUAL_TRUECOLOR:
+ /*
+ * 16/32 bit true-colour, use pseuo-palette for 16 base color
+ */
+ if (regno < 16) {
+ if (sfb->fb.var.bits_per_pixel == 16) {
+ u32 *pal = sfb->fb.pseudo_palette;
+ val = chan_to_field(red, &sfb->fb.var.red);
+ val |= chan_to_field(green, \
+ &sfb->fb.var.green);
+ val |= chan_to_field(blue, &sfb->fb.var.blue);
+#ifdef __BIG_ENDIAN
+ pal[regno] =
+ ((red & 0xf800) >> 8) |
+ ((green & 0xe000) >> 13) |
+ ((green & 0x1c00) << 3) |
+ ((blue & 0xf800) >> 3);
+#else
+ pal[regno] = val;
+#endif
+ } else {
+ u32 *pal = sfb->fb.pseudo_palette;
+ val = chan_to_field(red, &sfb->fb.var.red);
+ val |= chan_to_field(green, \
+ &sfb->fb.var.green);
+ val |= chan_to_field(blue, &sfb->fb.var.blue);
+#ifdef __BIG_ENDIAN
+ val =
+ (val & 0xff00ff00 >> 8) |
+ (val & 0x00ff00ff << 8);
+#endif
+ pal[regno] = val;
+ }
+ }
+ break;
+
+ case FB_VISUAL_PSEUDOCOLOR:
+ /* color depth 8 bit */
+ sm712_setpalette(regno, red, green, blue, info);
+ break;
+
+ default:
+ return 1; /* unknown type */
+ }
+
+ return 0;
+
+}
+
+#ifdef __BIG_ENDIAN
+static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t
+ count, loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ u32 *buffer, *dst;
+ u32 __iomem *src;
+ int c, i, cnt = 0, err = 0;
+ unsigned long total_size;
+
+ if (!info || !info->screen_base)
+ return -ENODEV;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->screen_size;
+
+ if (total_size == 0)
+ total_size = info->fix.smem_len;
+
+ if (p >= total_size)
+ return 0;
+
+ if (count >= total_size)
+ count = total_size;
+
+ if (count + p > total_size)
+ count = total_size - p;
+
+ buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ src = (u32 __iomem *) (info->screen_base + p);
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ while (count) {
+ c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ dst = buffer;
+ for (i = c >> 2; i--;) {
+ *dst = fb_readl(src++);
+ *dst =
+ (*dst & 0xff00ff00 >> 8) |
+ (*dst & 0x00ff00ff << 8);
+ dst++;
+ }
+ if (c & 3) {
+ u8 *dst8 = (u8 *) dst;
+ u8 __iomem *src8 = (u8 __iomem *) src;
+
+ for (i = c & 3; i--;) {
+ if (i & 1) {
+ *dst8++ = fb_readb(++src8);
+ } else {
+ *dst8++ = fb_readb(--src8);
+ src8 += 2;
+ }
+ }
+ src = (u32 __iomem *) src8;
+ }
+
+ if (copy_to_user(buf, buffer, c)) {
+ err = -EFAULT;
+ break;
+ }
+ *ppos += c;
+ buf += c;
+ cnt += c;
+ count -= c;
+ }
+
+ kfree(buffer);
+
+ return (err) ? err : cnt;
+}
+
+static ssize_t
+smtcfb_write(struct fb_info *info, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned long p = *ppos;
+
+ u32 *buffer, *src;
+ u32 __iomem *dst;
+ int c, i, cnt = 0, err = 0;
+ unsigned long total_size;
+
+ if (!info || !info->screen_base)
+ return -ENODEV;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->screen_size;
+
+ if (total_size == 0)
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
+
+ count = total_size - p;
+ }
+
+ buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ dst = (u32 __iomem *) (info->screen_base + p);
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ while (count) {
+ c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+ src = buffer;
+
+ if (copy_from_user(src, buf, c)) {
+ err = -EFAULT;
+ break;
+ }
+
+ for (i = c >> 2; i--;) {
+ fb_writel((*src & 0xff00ff00 >> 8) |
+ (*src & 0x00ff00ff << 8), dst++);
+ src++;
+ }
+ if (c & 3) {
+ u8 *src8 = (u8 *) src;
+ u8 __iomem *dst8 = (u8 __iomem *) dst;
+
+ for (i = c & 3; i--;) {
+ if (i & 1) {
+ fb_writeb(*src8++, ++dst8);
+ } else {
+ fb_writeb(*src8++, --dst8);
+ dst8 += 2;
+ }
+ }
+ dst = (u32 __iomem *) dst8;
+ }
+
+ *ppos += c;
+ buf += c;
+ cnt += c;
+ count -= c;
+ }
+
+ kfree(buffer);
+
+ return (cnt) ? cnt : err;
+}
+#endif /* ! __BIG_ENDIAN */
+
+#include "smtc2d.c"
+
+void smtcfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct par_info *p = (struct par_info *)info->par;
+
+ if (smtc_2Dacceleration) {
+ if (!area->width || !area->height)
+ return;
+
+ deCopy(p->BaseAddressInVRAM, 0, info->var.bits_per_pixel,
+ area->dx, area->dy, area->width, area->height,
+ p->BaseAddressInVRAM, 0, area->sx, area->sy, 0, 0xC);
+
+ } else
+ cfb_copyarea(info, area);
+}
+
+void smtcfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct par_info *p = (struct par_info *)info->par;
+
+ if (smtc_2Dacceleration) {
+ if (!rect->width || !rect->height)
+ return;
+ if (info->var.bits_per_pixel >= 24)
+ deFillRect(p->BaseAddressInVRAM, 0, rect->dx * 3,
+ rect->dy * 3, rect->width * 3, rect->height,
+ rect->color);
+ else
+ deFillRect(p->BaseAddressInVRAM, 0, rect->dx, rect->dy,
+ rect->width, rect->height, rect->color);
+ } else
+ cfb_fillrect(info, rect);
+}
+
+void smtcfb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct par_info *p = (struct par_info *)info->par;
+ u32 bg_col = 0, fg_col = 0;
+
+ if ((smtc_2Dacceleration) && (image->depth == 1)) {
+ if (smtc_de_busy)
+ deWaitForNotBusy();
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ bg_col = image->bg_color;
+ fg_col = image->fg_color;
+ break;
+ case 16:
+ bg_col =
+ ((u32 *) (info->pseudo_palette))[image->bg_color];
+ fg_col =
+ ((u32 *) (info->pseudo_palette))[image->fg_color];
+ break;
+ case 32:
+ bg_col =
+ ((u32 *) (info->pseudo_palette))[image->bg_color];
+ fg_col =
+ ((u32 *) (info->pseudo_palette))[image->fg_color];
+ break;
+ }
+
+ deSystemMem2VideoMemMonoBlt(
+ image->data,
+ image->width / 8,
+ 0,
+ p->BaseAddressInVRAM,
+ 0,
+ 0,
+ image->dx, image->dy,
+ image->width, image->height,
+ fg_col, bg_col,
+ 0x0C);
+
+ } else
+ cfb_imageblit(info, image);
+}
+
+static struct fb_ops smtcfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = smtc_setcolreg,
+ .fb_blank = smtcfb_blank,
+ .fb_fillrect = smtcfb_fillrect,
+ .fb_imageblit = smtcfb_imageblit,
+ .fb_copyarea = smtcfb_copyarea,
+#ifdef __BIG_ENDIAN
+ .fb_read = smtcfb_read,
+ .fb_write = smtcfb_write,
+#endif
+
+};
+
+void smtcfb_setmode(struct smtcfb_info *sfb)
+{
+ switch (sfb->fb.var.bits_per_pixel) {
+ case 32:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 4;
+ sfb->fb.var.red.length = 8;
+ sfb->fb.var.green.length = 8;
+ sfb->fb.var.blue.length = 8;
+ sfb->fb.var.red.offset = 16;
+ sfb->fb.var.green.offset = 8;
+ sfb->fb.var.blue.offset = 0;
+
+ break;
+ case 8:
+ sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres;
+ sfb->fb.var.red.offset = 5;
+ sfb->fb.var.red.length = 3;
+ sfb->fb.var.green.offset = 2;
+ sfb->fb.var.green.length = 3;
+ sfb->fb.var.blue.offset = 0;
+ sfb->fb.var.blue.length = 2;
+ break;
+ case 24:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 3;
+ sfb->fb.var.red.length = 8;
+ sfb->fb.var.green.length = 8;
+ sfb->fb.var.blue.length = 8;
+
+ sfb->fb.var.red.offset = 16;
+ sfb->fb.var.green.offset = 8;
+ sfb->fb.var.blue.offset = 0;
+
+ break;
+ case 16:
+ default:
+ sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
+ sfb->fb.fix.line_length = sfb->fb.var.xres * 2;
+
+ sfb->fb.var.red.length = 5;
+ sfb->fb.var.green.length = 6;
+ sfb->fb.var.blue.length = 5;
+
+ sfb->fb.var.red.offset = 11;
+ sfb->fb.var.green.offset = 5;
+ sfb->fb.var.blue.offset = 0;
+
+ break;
+ }
+
+ hw.width = sfb->fb.var.xres;
+ hw.height = sfb->fb.var.yres;
+ hw.hz = 60;
+ smtc_set_timing(sfb, &hw);
+ if (smtc_2Dacceleration) {
+ printk("2D acceleration enabled!\n");
+ /* Init smtc drawing engine */
+ deInit(sfb->fb.var.xres, sfb->fb.var.yres,
+ sfb->fb.var.bits_per_pixel);
+ }
+}
+
+/*
+ * Alloc struct smtcfb_info and assign the default value
+ */
+static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *dev,
+ char *name)
+{
+ struct smtcfb_info *sfb;
+
+ sfb = kmalloc(sizeof(struct smtcfb_info), GFP_KERNEL);
+
+ if (!sfb)
+ return NULL;
+
+ memset(sfb, 0, sizeof(struct smtcfb_info));
+
+ sfb->currcon = -1;
+ sfb->dev = dev;
+
+ /*** Init sfb->fb with default value ***/
+ sfb->fb.flags = FBINFO_FLAG_DEFAULT;
+ sfb->fb.fbops = &smtcfb_ops;
+ sfb->fb.var = smtcfb_var;
+ sfb->fb.fix = smtcfb_fix;
+
+ strcpy(sfb->fb.fix.id, name);
+
+ sfb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
+ sfb->fb.fix.type_aux = 0;
+ sfb->fb.fix.xpanstep = 0;
+ sfb->fb.fix.ypanstep = 0;
+ sfb->fb.fix.ywrapstep = 0;
+ sfb->fb.fix.accel = FB_ACCEL_SMI_LYNX;
+
+ sfb->fb.var.nonstd = 0;
+ sfb->fb.var.activate = FB_ACTIVATE_NOW;
+ sfb->fb.var.height = -1;
+ sfb->fb.var.width = -1;
+ /* text mode acceleration */
+ sfb->fb.var.accel_flags = FB_ACCELF_TEXT;
+ sfb->fb.var.vmode = FB_VMODE_NONINTERLACED;
+ sfb->fb.par = &hw;
+ sfb->fb.pseudo_palette = colreg;
+
+ return sfb;
+}
+
+/*
+ * Unmap in the memory mapped IO registers
+ */
+
+static void smtc_unmap_mmio(struct smtcfb_info *sfb)
+{
+ if (sfb && smtc_RegBaseAddress)
+ smtc_RegBaseAddress = NULL;
+}
+
+/*
+ * Map in the screen memory
+ */
+
+static int smtc_map_smem(struct smtcfb_info *sfb,
+ struct pci_dev *dev, u_long smem_len)
+{
+ if (sfb->fb.var.bits_per_pixel == 32) {
+#ifdef __BIG_ENDIAN
+ sfb->fb.fix.smem_start = pci_resource_start(dev, 0)
+ + 0x800000;
+#else
+ sfb->fb.fix.smem_start = pci_resource_start(dev, 0);
+#endif
+ } else {
+ sfb->fb.fix.smem_start = pci_resource_start(dev, 0);
+ }
+
+ sfb->fb.fix.smem_len = smem_len;
+
+ sfb->fb.screen_base = smtc_VRAMBaseAddress;
+
+ if (!sfb->fb.screen_base) {
+ printk(KERN_INFO "%s: unable to map screen memory\n",
+ sfb->fb.fix.id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Unmap in the screen memory
+ *
+ */
+static void smtc_unmap_smem(struct smtcfb_info *sfb)
+{
+ if (sfb && sfb->fb.screen_base) {
+ iounmap(sfb->fb.screen_base);
+ sfb->fb.screen_base = NULL;
+ }
+}
+
+/*
+ * We need to wake up the LynxEM+, and make sure its in linear memory mode.
+ */
+static inline void sm7xx_init_hw(void)
+{
+ outb_p(0x18, 0x3c4);
+ outb_p(0x11, 0x3c5);
+}
+
+static void smtc_free_fb_info(struct smtcfb_info *sfb)
+{
+ if (sfb) {
+ fb_alloc_cmap(&sfb->fb.cmap, 0, 0);
+ kfree(sfb);
+ }
+}
+
+/*
+ * sm712vga_setup - process command line options, get vga parameter
+ * @options: string of options
+ * Returns zero.
+ *
+ */
+static int __init __maybe_unused sm712vga_setup(char *options)
+{
+ int index;
+
+ if (!options || !*options) {
+ smdbg("\n No vga parameter\n");
+ return -EINVAL;
+ }
+
+ smtc_screen_info.lfb_width = 0;
+ smtc_screen_info.lfb_height = 0;
+ smtc_screen_info.lfb_depth = 0;
+
+ smdbg("\nsm712vga_setup = %s\n", options);
+
+ for (index = 0;
+ index < (sizeof(vesa_mode) / sizeof(struct vesa_mode_table));
+ index++) {
+ if (strstr(options, vesa_mode[index].mode_index)) {
+ smtc_screen_info.lfb_width = vesa_mode[index].lfb_width;
+ smtc_screen_info.lfb_height =
+ vesa_mode[index].lfb_height;
+ smtc_screen_info.lfb_depth = vesa_mode[index].lfb_depth;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+__setup("vga=", sm712vga_setup);
+
+/* Jason (08/13/2009)
+ * Original init function changed to probe method to be used by pci_drv
+ * process used to detect chips replaced with kernel process in pci_drv
+ */
+static int __init smtcfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct smtcfb_info *sfb;
+ u_long smem_size = 0x00800000; /* default 8MB */
+ char name[16];
+ int err;
+ unsigned long pFramebufferPhysical;
+
+ printk(KERN_INFO
+ "Silicon Motion display driver " SMTC_LINUX_FB_VERSION "\n");
+
+ err = pci_enable_device(pdev); /* enable SMTC chip */
+
+ if (err)
+ return err;
+ err = -ENOMEM;
+
+ hw.chipID = ent->device;
+ sprintf(name, "sm%Xfb", hw.chipID);
+
+ sfb = smtc_alloc_fb_info(pdev, name);
+
+ if (!sfb)
+ goto failed;
+ /* Jason (08/13/2009)
+ * Store fb_info to be further used when suspending and resuming
+ */
+ pci_set_drvdata(pdev, sfb);
+
+ sm7xx_init_hw();
+
+ /*get mode parameter from smtc_screen_info */
+ if (smtc_screen_info.lfb_width != 0) {
+ sfb->fb.var.xres = smtc_screen_info.lfb_width;
+ sfb->fb.var.yres = smtc_screen_info.lfb_height;
+ sfb->fb.var.bits_per_pixel = smtc_screen_info.lfb_depth;
+ } else {
+ /* default resolution 1024x600 16bit mode */
+ sfb->fb.var.xres = SCREEN_X_RES;
+ sfb->fb.var.yres = SCREEN_Y_RES;
+ sfb->fb.var.bits_per_pixel = SCREEN_BPP;
+ }
+
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 24)
+ sfb->fb.var.bits_per_pixel = (smtc_screen_info.lfb_depth = 32);
+#endif
+ /* Map address and memory detection */
+ pFramebufferPhysical = pci_resource_start(pdev, 0);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw.chipRevID);
+
+ switch (hw.chipID) {
+ case 0x710:
+ case 0x712:
+ sfb->fb.fix.mmio_start = pFramebufferPhysical + 0x00400000;
+ sfb->fb.fix.mmio_len = 0x00400000;
+ smem_size = SM712_VIDEOMEMORYSIZE;
+#ifdef __BIG_ENDIAN
+ hw.m_pLFB = (smtc_VRAMBaseAddress =
+ ioremap(pFramebufferPhysical, 0x00c00000));
+#else
+ hw.m_pLFB = (smtc_VRAMBaseAddress =
+ ioremap(pFramebufferPhysical, 0x00800000));
+#endif
+ hw.m_pMMIO = (smtc_RegBaseAddress =
+ smtc_VRAMBaseAddress + 0x00700000);
+ smtc_2DBaseAddress = (hw.m_pDPR =
+ smtc_VRAMBaseAddress + 0x00408000);
+ smtc_2Ddataport = smtc_VRAMBaseAddress + DE_DATA_PORT_712;
+ hw.m_pVPR = hw.m_pLFB + 0x0040c000;
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32) {
+ smtc_VRAMBaseAddress += 0x800000;
+ hw.m_pLFB += 0x800000;
+ printk(KERN_INFO
+ "\nsmtc_VRAMBaseAddress=%p hw.m_pLFB=%p\n",
+ smtc_VRAMBaseAddress, hw.m_pLFB);
+ }
+#endif
+ if (!smtc_RegBaseAddress) {
+ printk(KERN_INFO
+ "%s: unable to map memory mapped IO\n",
+ sfb->fb.fix.id);
+ return -ENOMEM;
+ }
+
+ /* set MCLK = 14.31818 * (0x16 / 0x2) */
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x62, 0x3e);
+ /* enable PCI burst */
+ smtc_seqw(0x17, 0x20);
+ /* enable word swap */
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32)
+ smtc_seqw(0x17, 0x30);
+#endif
+#ifdef CONFIG_FB_SM7XX_ACCEL
+ smtc_2Dacceleration = 1;
+#endif
+ break;
+ case 0x720:
+ sfb->fb.fix.mmio_start = pFramebufferPhysical;
+ sfb->fb.fix.mmio_len = 0x00200000;
+ smem_size = SM722_VIDEOMEMORYSIZE;
+ smtc_2DBaseAddress = (hw.m_pDPR =
+ ioremap(pFramebufferPhysical, 0x00a00000));
+ hw.m_pLFB = (smtc_VRAMBaseAddress =
+ smtc_2DBaseAddress + 0x00200000);
+ hw.m_pMMIO = (smtc_RegBaseAddress =
+ smtc_2DBaseAddress + 0x000c0000);
+ smtc_2Ddataport = smtc_2DBaseAddress + DE_DATA_PORT_722;
+ hw.m_pVPR = smtc_2DBaseAddress + 0x800;
+
+ smtc_seqw(0x62, 0xff);
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ smtc_2Dacceleration = 0;
+ break;
+ default:
+ printk(KERN_INFO
+ "No valid Silicon Motion display chip was detected!\n");
+
+ smtc_free_fb_info(sfb);
+ return err;
+ }
+
+ /* can support 32 bpp */
+ if (15 == sfb->fb.var.bits_per_pixel)
+ sfb->fb.var.bits_per_pixel = 16;
+
+ sfb->fb.var.xres_virtual = sfb->fb.var.xres;
+ sfb->fb.var.yres_virtual = sfb->fb.var.yres;
+ err = smtc_map_smem(sfb, pdev, smem_size);
+ if (err)
+ goto failed;
+
+ smtcfb_setmode(sfb);
+ /* Primary display starting from 0 postion */
+ hw.BaseAddressInVRAM = 0;
+ sfb->fb.par = &hw;
+
+ err = register_framebuffer(&sfb->fb);
+ if (err < 0)
+ goto failed;
+
+ printk(KERN_INFO "Silicon Motion SM%X Rev%X primary display mode"
+ "%dx%d-%d Init Complete.\n", hw.chipID, hw.chipRevID,
+ sfb->fb.var.xres, sfb->fb.var.yres,
+ sfb->fb.var.bits_per_pixel);
+
+ return 0;
+
+ failed:
+ printk(KERN_INFO "Silicon Motion, Inc. primary display init fail\n");
+
+ smtc_unmap_smem(sfb);
+ smtc_unmap_mmio(sfb);
+ smtc_free_fb_info(sfb);
+
+ return err;
+}
+
+
+/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
+static struct pci_device_id smtcfb_pci_table[] = {
+ {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+
+
+/* Jason (08/14/2009)
+ * do some clean up when the driver module is removed
+ */
+static void __devexit smtcfb_pci_remove(struct pci_dev *pdev)
+{
+ struct smtcfb_info *sfb;
+
+ sfb = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, NULL);
+ smtc_unmap_smem(sfb);
+ smtc_unmap_mmio(sfb);
+ unregister_framebuffer(&sfb->fb);
+ smtc_free_fb_info(sfb);
+}
+
+/* Jason (08/14/2009)
+ * suspend function, called when the suspend event is triggered
+ */
+static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct smtcfb_info *sfb;
+ int retv;
+
+ sfb = pci_get_drvdata(pdev);
+
+ /* set the hw in sleep mode use externel clock and self memory refresh
+ * so that we can turn off internal PLLs later on
+ */
+ smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
+ smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
+
+ switch (msg.event) {
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_PRETHAW:
+ pdev->dev.power.power_state = msg;
+ return 0;
+ }
+
+ /* when doing suspend, call fb apis and pci apis */
+ if (msg.event == PM_EVENT_SUSPEND) {
+ acquire_console_sem();
+ fb_set_suspend(&sfb->fb, 1);
+ release_console_sem();
+ retv = pci_save_state(pdev);
+ pci_disable_device(pdev);
+ retv = pci_choose_state(pdev, msg);
+ retv = pci_set_power_state(pdev, retv);
+ }
+
+ pdev->dev.power.power_state = msg;
+
+ /* additionaly turn off all function blocks including internal PLLs */
+ smtc_seqw(0x21, 0xff);
+
+ return 0;
+}
+
+static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
+{
+ struct smtcfb_info *sfb;
+ int retv;
+
+ sfb = pci_get_drvdata(pdev);
+
+ /* when resuming, restore pci data and fb cursor */
+ if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
+ retv = pci_set_power_state(pdev, PCI_D0);
+ retv = pci_restore_state(pdev);
+ if (pci_enable_device(pdev))
+ return -1;
+ pci_set_master(pdev);
+ }
+
+ /* reinit hardware */
+ sm7xx_init_hw();
+ switch (hw.chipID) {
+ case 0x710:
+ case 0x712:
+ /* set MCLK = 14.31818 * (0x16 / 0x2) */
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ smtc_seqw(0x62, 0x3e);
+ /* enable PCI burst */
+ smtc_seqw(0x17, 0x20);
+#ifdef __BIG_ENDIAN
+ if (sfb->fb.var.bits_per_pixel == 32)
+ smtc_seqw(0x17, 0x30);
+#endif
+ break;
+ case 0x720:
+ smtc_seqw(0x62, 0xff);
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ }
+
+ smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
+ smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
+
+ smtcfb_setmode(sfb);
+
+ acquire_console_sem();
+ fb_set_suspend(&sfb->fb, 0);
+ release_console_sem();
+
+ return 0;
+}
+
+/* Jason (08/13/2009)
+ * pci_driver struct used to wrap the original driver
+ * so that it can be registered into the kernel and
+ * the proper method would be called when suspending and resuming
+ */
+static struct pci_driver smtcfb_driver = {
+ .name = "smtcfb",
+ .id_table = smtcfb_pci_table,
+ .probe = smtcfb_pci_probe,
+ .remove = __devexit_p(smtcfb_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = smtcfb_suspend,
+ .resume = smtcfb_resume,
+#endif
+};
+
+static int __init smtcfb_init(void)
+{
+ return pci_register_driver(&smtcfb_driver);
+}
+
+static void __exit smtcfb_exit(void)
+{
+ pci_unregister_driver(&smtcfb_driver);
+}
+
+module_init(smtcfb_init);
+module_exit(smtcfb_exit);
+
+MODULE_AUTHOR("Siliconmotion ");
+MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
new file mode 100644
index 0000000..7f2c341
--- /dev/null
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -0,0 +1,793 @@
+/*
+ * Silicon Motion SM712 frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors: Ge Wang, gewang@siliconmotion.com
+ * Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#define SMTC_LINUX_FB_VERSION "version 0.11.2619.21.01 July 27, 2008"
+
+#define NR_PALETTE 256
+#define NR_RGB 2
+
+#define FB_ACCEL_SMI_LYNX 88
+
+#ifdef __BIG_ENDIAN
+#define PC_VGA 0
+#else
+#define PC_VGA 1
+#endif
+
+#define SCREEN_X_RES 1024
+#define SCREEN_Y_RES 600
+#define SCREEN_BPP 16
+
+#ifndef FIELD_OFFSET
+#define FIELD_OFSFET(type, field) \
+ ((unsigned long) (PUCHAR) & (((type *)0)->field))
+#endif
+
+/*Assume SM712 graphics chip has 4MB VRAM */
+#define SM712_VIDEOMEMORYSIZE 0x00400000
+/*Assume SM722 graphics chip has 8MB VRAM */
+#define SM722_VIDEOMEMORYSIZE 0x00800000
+
+#define dac_reg (0x3c8)
+#define dac_val (0x3c9)
+
+extern char *smtc_RegBaseAddress;
+#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
+#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
+#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
+
+#define smtc_mmiorb(reg) readb(smtc_RegBaseAddress + reg)
+#define smtc_mmiorw(reg) readw(smtc_RegBaseAddress + reg)
+#define smtc_mmiorl(reg) readl(smtc_RegBaseAddress + reg)
+
+#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
+#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
+#define SIZE_SR30_SR75 (0x75 - 0x30 + 1)
+#define SIZE_SR80_SR93 (0x93 - 0x80 + 1)
+#define SIZE_SRA0_SRAF (0xAF - 0xA0 + 1)
+#define SIZE_GR00_GR08 (0x08 - 0x00 + 1)
+#define SIZE_AR00_AR14 (0x14 - 0x00 + 1)
+#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
+#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
+#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
+#define SIZE_VPR (0x6C + 1)
+#define SIZE_DPR (0x44 + 1)
+
+static inline void smtc_crtcw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3d4);
+ smtc_mmiowb(val, 0x3d5);
+}
+
+static inline unsigned int smtc_crtcr(int reg)
+{
+ smtc_mmiowb(reg, 0x3d4);
+ return smtc_mmiorb(0x3d5);
+}
+
+static inline void smtc_grphw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3ce);
+ smtc_mmiowb(val, 0x3cf);
+}
+
+static inline unsigned int smtc_grphr(int reg)
+{
+ smtc_mmiowb(reg, 0x3ce);
+ return smtc_mmiorb(0x3cf);
+}
+
+static inline void smtc_attrw(int reg, int val)
+{
+ smtc_mmiorb(0x3da);
+ smtc_mmiowb(reg, 0x3c0);
+ smtc_mmiorb(0x3c1);
+ smtc_mmiowb(val, 0x3c0);
+}
+
+static inline void smtc_seqw(int reg, int val)
+{
+ smtc_mmiowb(reg, 0x3c4);
+ smtc_mmiowb(val, 0x3c5);
+}
+
+static inline unsigned int smtc_seqr(int reg)
+{
+ smtc_mmiowb(reg, 0x3c4);
+ return smtc_mmiorb(0x3c5);
+}
+
+/* The next structure holds all information relevant for a specific video mode.
+ */
+
+struct ModeInit {
+ int mmSizeX;
+ int mmSizeY;
+ int bpp;
+ int hz;
+ unsigned char Init_MISC;
+ unsigned char Init_SR00_SR04[SIZE_SR00_SR04];
+ unsigned char Init_SR10_SR24[SIZE_SR10_SR24];
+ unsigned char Init_SR30_SR75[SIZE_SR30_SR75];
+ unsigned char Init_SR80_SR93[SIZE_SR80_SR93];
+ unsigned char Init_SRA0_SRAF[SIZE_SRA0_SRAF];
+ unsigned char Init_GR00_GR08[SIZE_GR00_GR08];
+ unsigned char Init_AR00_AR14[SIZE_AR00_AR14];
+ unsigned char Init_CR00_CR18[SIZE_CR00_CR18];
+ unsigned char Init_CR30_CR4D[SIZE_CR30_CR4D];
+ unsigned char Init_CR90_CRA7[SIZE_CR90_CRA7];
+};
+
+/**********************************************************************
+ SM712 Mode table.
+ **********************************************************************/
+struct ModeInit VGAMode[] = {
+ {
+ /* mode#0: 640 x 480 16Bpp 60Hz */
+ 640, 480, 16, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+ {
+ /* mode#1: 640 x 480 24Bpp 60Hz */
+ 640, 480, 24, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+ {
+ /* mode#0: 640 x 480 32Bpp 60Hz */
+ 640, 480, 32, 60,
+ /* Init_MISC */
+ 0xE3,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+ 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+ 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+ 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+ 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+ 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+ 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+ 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+ 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+ },
+ },
+
+ { /* mode#2: 800 x 600 16Bpp 60Hz */
+ 800, 600, 16, 60,
+ /* Init_MISC */
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+ 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ { /* mode#3: 800 x 600 24Bpp 60Hz */
+ 800, 600, 24, 60,
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
+ 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
+ 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ { /* mode#7: 800 x 600 32Bpp 60Hz */
+ 800, 600, 32, 60,
+ /* Init_MISC */
+ 0x2B,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+ 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+ 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+ 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+ 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+ 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+ 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+ 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+ 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+ 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+ },
+ },
+ /* We use 1024x768 table to light 1024x600 panel for lemote */
+ { /* mode#4: 1024 x 600 16Bpp 60Hz */
+ 1024, 600, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x00, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
+ 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x00, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
+ 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
+ 0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
+ 0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#5: 1024 x 768 24Bpp 60Hz */
+ 1024, 768, 24, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#4: 1024 x 768 32Bpp 60Hz */
+ 1024, 768, 32, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+ { /* mode#6: 320 x 240 16Bpp 60Hz */
+ 320, 240, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+ 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+
+ { /* mode#8: 320 x 240 32Bpp 60Hz */
+ 320, 240, 32, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x32, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+ 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
+};
+
+#define numVGAModes (sizeof(VGAMode) / sizeof(struct ModeInit))
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 825bbc4..061e730 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,6 @@
config VT6655
tristate "VIA Technologies VT6655 support"
- depends on PCI
+ depends on PCI && WLAN
select WIRELESS_EXT
select WEXT_PRIV
---help---
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index 87bcd26..1055b52 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,6 @@
config VT6656
tristate "VIA Technologies VT6656 support"
- depends on USB
+ depends on USB && WLAN
select WIRELESS_EXT
select WEXT_PRIV
---help---
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 7d76a7f..aaa70ed 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -439,7 +439,7 @@ void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks)
}
}
*nfchunks = 0;
- memset(fchunk, 0, sizeof(fchunk));
+ memset(fchunk, 0, sizeof(*fchunk));
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 6f8d8f97..5066de5 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -225,6 +225,12 @@ passive_store(struct device *dev, struct device_attribute *attr,
if (!sscanf(buf, "%d\n", &state))
return -EINVAL;
+ /* sanity check: values below 1000 millicelcius don't make sense
+ * and can cause the system to go into a thermal heart attack
+ */
+ if (state && state < 1000)
+ return -EINVAL;
+
if (state && !tz->forced_passive) {
mutex_lock(&thermal_list_lock);
list_for_each_entry(cdev, &thermal_cdev_list, node) {
@@ -235,6 +241,8 @@ passive_store(struct device *dev, struct device_attribute *attr,
cdev);
}
mutex_unlock(&thermal_list_lock);
+ if (!tz->passive_delay)
+ tz->passive_delay = 1000;
} else if (!state && tz->forced_passive) {
mutex_lock(&thermal_list_lock);
list_for_each_entry(cdev, &thermal_cdev_list, node) {
@@ -245,17 +253,12 @@ passive_store(struct device *dev, struct device_attribute *attr,
cdev);
}
mutex_unlock(&thermal_list_lock);
+ tz->passive_delay = 0;
}
tz->tc1 = 1;
tz->tc2 = 1;
- if (!tz->passive_delay)
- tz->passive_delay = 1000;
-
- if (!tz->polling_delay)
- tz->polling_delay = 10000;
-
tz->forced_passive = state;
thermal_zone_device_update(tz);
@@ -374,7 +377,7 @@ thermal_cooling_device_cur_state_store(struct device *dev,
if (!sscanf(buf, "%ld\n", &state))
return -EINVAL;
- if (state < 0)
+ if ((long)state < 0)
return -EINVAL;
result = cdev->ops->set_cur_state(cdev, state);
@@ -1016,6 +1019,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, tz->passive_delay);
else if (tz->polling_delay)
thermal_zone_device_set_polling(tz, tz->polling_delay);
+ else
+ thermal_zone_device_set_polling(tz, 0);
mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL(thermal_zone_device_update);
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 473aa1a..be3c9b8 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -44,5 +44,3 @@ obj-y += early/
obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
-
-obj-$(CONFIG_USB_ULPI) += otg/
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 96f1171..355dffc 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
return 0;
/* allocate 2^1 pages = 8K (on i386);
* should be more than enough for one device */
- pages_start = (char *)__get_free_pages(GFP_KERNEL, 1);
+ pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
if (!pages_start)
return -ENOMEM;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdf..a678186 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer)
+ if (as->userbuffer && urb->actual_length)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->transfer_buffer_length))
+ urb->actual_length))
goto err_out;
if (put_user(as->status, &userurb->status))
goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
}
}
- free_async(as);
-
if (put_user(addr, (void __user * __user *)arg))
return -EFAULT;
return 0;
err_out:
- free_async(as);
return -EFAULT;
}
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
static int proc_reapurb(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as)
- return processcompl(as, (void __user * __user *)arg);
+ if (as) {
+ int retval = processcompl(as, (void __user * __user *)arg);
+ free_async(as);
+ return retval;
+ }
if (signal_pending(current))
return -EINTR;
return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
{
+ int retval;
struct async *as;
- if (!(as = async_getcompleted(ps)))
- return -EAGAIN;
- return processcompl(as, (void __user * __user *)arg);
+ as = async_getcompleted(ps);
+ retval = -EAGAIN;
+ if (as) {
+ retval = processcompl(as, (void __user * __user *)arg);
+ free_async(as);
+ }
+ return retval;
}
#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer)
+ if (as->userbuffer && urb->actual_length)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->transfer_buffer_length))
+ urb->actual_length))
return -EFAULT;
if (put_user(as->status, &userurb->status))
return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
}
}
- free_async(as);
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as)
- return processcompl_compat(as, (void __user * __user *)arg);
+ if (as) {
+ int retval = processcompl_compat(as, (void __user * __user *)arg);
+ free_async(as);
+ return retval;
+ }
if (signal_pending(current))
return -EINTR;
return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
{
+ int retval;
struct async *as;
- if (!(as = async_getcompleted(ps)))
- return -EAGAIN;
- return processcompl_compat(as, (void __user * __user *)arg);
+ retval = -EAGAIN;
+ as = async_getcompleted(ps);
+ if (as) {
+ retval = processcompl_compat(as, (void __user * __user *)arg);
+ free_async(as);
+ }
+ return retval;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6dac3b8..80995ef 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1597,7 +1597,9 @@ rescan:
}
/**
- * Check whether a new bandwidth setting exceeds the bus bandwidth.
+ * usb_hcd_alloc_bandwidth - check whether a new bandwidth setting exceeds
+ * the bus bandwidth
+ * @udev: target &usb_device
* @new_config: new configuration to install
* @cur_alt: the current alternate interface setting
* @new_alt: alternate interface setting that is being installed
@@ -1682,6 +1684,24 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
}
}
if (cur_alt && new_alt) {
+ struct usb_interface *iface = usb_ifnum_to_if(udev,
+ cur_alt->desc.bInterfaceNumber);
+
+ if (iface->resetting_device) {
+ /*
+ * The USB core just reset the device, so the xHCI host
+ * and the device will think alt setting 0 is installed.
+ * However, the USB core will pass in the alternate
+ * setting installed before the reset as cur_alt. Dig
+ * out the alternate setting 0 structure, or the first
+ * alternate setting if a broken device doesn't have alt
+ * setting 0.
+ */
+ cur_alt = usb_altnum_to_altsetting(iface, 0);
+ if (!cur_alt)
+ cur_alt = &iface->altsetting[0];
+ }
+
/* Drop all the endpoints in the current alt setting */
for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
ret = hcd->driver->drop_endpoint(hcd, udev,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 06af970..35cc8b9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1658,12 +1658,12 @@ static inline void announce_device(struct usb_device *udev) { }
#endif
/**
- * usb_configure_device_otg - FIXME (usbcore-internal)
+ * usb_enumerate_device_otg - FIXME (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * Do configuration for On-The-Go devices
+ * Finish enumeration for On-The-Go devices
*/
-static int usb_configure_device_otg(struct usb_device *udev)
+static int usb_enumerate_device_otg(struct usb_device *udev)
{
int err = 0;
@@ -1734,7 +1734,7 @@ fail:
/**
- * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal)
+ * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is only called by usb_new_device() and usb_authorize_device()
@@ -1745,7 +1745,7 @@ fail:
* the string descriptors, as they will be errored out by the device
* until it has been authorized.
*/
-static int usb_configure_device(struct usb_device *udev)
+static int usb_enumerate_device(struct usb_device *udev)
{
int err;
@@ -1769,7 +1769,7 @@ static int usb_configure_device(struct usb_device *udev)
udev->descriptor.iManufacturer);
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
}
- err = usb_configure_device_otg(udev);
+ err = usb_enumerate_device_otg(udev);
fail:
return err;
}
@@ -1779,8 +1779,8 @@ fail:
* usb_new_device - perform initial device setup (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is called with devices which have been enumerated, but not yet
- * configured. The device descriptor is available, but not descriptors
+ * This is called with devices which have been detected but not fully
+ * enumerated. The device descriptor is available, but not descriptors
* for any device configuration. The caller must have locked either
* the parent hub (if udev is a normal device) or else the
* usb_bus_list_lock (if udev is a root hub). The parent's pointer to
@@ -1803,8 +1803,8 @@ int usb_new_device(struct usb_device *udev)
if (udev->parent)
usb_autoresume_device(udev->parent);
- usb_detect_quirks(udev); /* Determine quirks */
- err = usb_configure_device(udev); /* detect & probe dev/intfs */
+ usb_detect_quirks(udev);
+ err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
@@ -1849,21 +1849,23 @@ fail:
*/
int usb_deauthorize_device(struct usb_device *usb_dev)
{
- unsigned cnt;
usb_lock_device(usb_dev);
if (usb_dev->authorized == 0)
goto out_unauthorized;
+
usb_dev->authorized = 0;
usb_set_configuration(usb_dev, -1);
+
+ kfree(usb_dev->product);
usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+ kfree(usb_dev->manufacturer);
usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
+ kfree(usb_dev->serial);
usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
- kfree(usb_dev->config);
- usb_dev->config = NULL;
- for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
- kfree(usb_dev->rawdescriptors[cnt]);
+
+ usb_destroy_configuration(usb_dev);
usb_dev->descriptor.bNumConfigurations = 0;
- kfree(usb_dev->rawdescriptors);
+
out_unauthorized:
usb_unlock_device(usb_dev);
return 0;
@@ -1873,15 +1875,11 @@ out_unauthorized:
int usb_authorize_device(struct usb_device *usb_dev)
{
int result = 0, c;
+
usb_lock_device(usb_dev);
if (usb_dev->authorized == 1)
goto out_authorized;
- kfree(usb_dev->product);
- usb_dev->product = NULL;
- kfree(usb_dev->manufacturer);
- usb_dev->manufacturer = NULL;
- kfree(usb_dev->serial);
- usb_dev->serial = NULL;
+
result = usb_autoresume_device(usb_dev);
if (result < 0) {
dev_err(&usb_dev->dev,
@@ -1894,10 +1892,18 @@ int usb_authorize_device(struct usb_device *usb_dev)
"authorization: %d\n", result);
goto error_device_descriptor;
}
+
+ kfree(usb_dev->product);
+ usb_dev->product = NULL;
+ kfree(usb_dev->manufacturer);
+ usb_dev->manufacturer = NULL;
+ kfree(usb_dev->serial);
+ usb_dev->serial = NULL;
+
usb_dev->authorized = 1;
- result = usb_configure_device(usb_dev);
+ result = usb_enumerate_device(usb_dev);
if (result < 0)
- goto error_configure;
+ goto error_enumerate;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
@@ -1912,8 +1918,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
}
}
dev_info(&usb_dev->dev, "authorized to connect\n");
-error_configure:
+
+error_enumerate:
error_device_descriptor:
+ usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
usb_unlock_device(usb_dev); // complements locktree
@@ -3339,6 +3347,9 @@ static void hub_events(void)
USB_PORT_FEAT_C_SUSPEND);
udev = hdev->children[i-1];
if (udev) {
+ /* TRSMRCY = 10 msec */
+ msleep(10);
+
usb_lock_device(udev);
ret = remote_wakeup(hdev->
children[i-1]);
@@ -3684,19 +3695,14 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
usb_enable_interface(udev, intf, true);
ret = 0;
} else {
- /* We've just reset the device, so it will think alt
- * setting 0 is installed. For usb_set_interface() to
- * work properly, we need to set the current alternate
- * interface setting to 0 (or the first alt setting, if
- * the device doesn't have alt setting 0).
+ /* Let the bandwidth allocation function know that this
+ * device has been reset, and it will have to use
+ * alternate setting 0 as the current alternate setting.
*/
- intf->cur_altsetting =
- usb_find_alt_setting(config, i, 0);
- if (!intf->cur_altsetting)
- intf->cur_altsetting =
- &config->intf_cache[i]->altsetting[0];
+ intf->resetting_device = 1;
ret = usb_set_interface(udev, desc->bInterfaceNumber,
desc->bAlternateSetting);
+ intf->resetting_device = 0;
}
if (ret < 0) {
dev_err(&udev->dev, "failed to restore interface %d "
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 1b99484..9bc95fe 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -906,11 +906,11 @@ char *usb_cache_string(struct usb_device *udev, int index)
if (index <= 0)
return NULL;
- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
+ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
if (buf) {
len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
- smallbuf = kmalloc(++len, GFP_KERNEL);
+ smallbuf = kmalloc(++len, GFP_NOIO);
if (!smallbuf)
return buf;
memcpy(smallbuf, buf, len);
@@ -1731,7 +1731,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
if (cp) {
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
- GFP_KERNEL);
+ GFP_NOIO);
if (!new_interfaces) {
dev_err(&dev->dev, "Out of memory\n");
return -ENOMEM;
@@ -1740,7 +1740,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
- GFP_KERNEL);
+ GFP_NOIO);
if (!new_interfaces[n]) {
dev_err(&dev->dev, "Out of memory\n");
ret = -ENOMEM;
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 1547700..5f3908f 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -82,9 +82,13 @@ static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
+ int retval; \
\
udev = to_usb_device(dev); \
- return sprintf(buf, "%s\n", udev->name); \
+ usb_lock_device(udev); \
+ retval = sprintf(buf, "%s\n", udev->name); \
+ usb_unlock_device(udev); \
+ return retval; \
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
@@ -111,6 +115,12 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
case USB_SPEED_HIGH:
speed = "480";
break;
+ case USB_SPEED_VARIABLE:
+ speed = "480";
+ break;
+ case USB_SPEED_SUPER:
+ speed = "5000";
+ break;
default:
speed = "unknown";
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2fb4204..0daff0d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -66,9 +66,9 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
* for the given interface.
- * @config - the configuration to search (not necessarily the current config).
- * @iface_num - interface number to search in
- * @alt_num - alternate interface setting number to search for.
+ * @config: the configuration to search (not necessarily the current config).
+ * @iface_num: interface number to search in
+ * @alt_num: alternate interface setting number to search for.
*
* Search the configuration's interface cache for the given alt setting.
*/
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 1206a26..2958a12 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -613,7 +613,7 @@ err:
}
EXPORT_SYMBOL_GPL(dbgp_external_startup);
-static int __init ehci_reset_port(int port)
+static int ehci_reset_port(int port)
{
u32 portsc;
u32 delay_time, delay;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 58f2203..a62af7b 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -158,6 +158,7 @@ fail:
static int __exit audio_unbind(struct usb_composite_dev *cdev)
{
+ gaudio_cleanup();
return 0;
}
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index c43c89f..df77f61 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -56,13 +56,16 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
+ + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct uac_ac_header_descriptor_2 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
.bcdADC = __constant_cpu_to_le16(0x0100),
- .wTotalLength = __constant_cpu_to_le16(UAC_DT_AC_HEADER_LENGTH),
+ .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
[0] = F_AUDIO_AC_INTERFACE,
@@ -252,12 +255,12 @@ static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
if (!copy_buf)
- return (struct f_audio_buf *)-ENOMEM;
+ return ERR_PTR(-ENOMEM);
copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
if (!copy_buf->buf) {
kfree(copy_buf);
- return (struct f_audio_buf *)-ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
return copy_buf;
@@ -332,7 +335,7 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
list_add_tail(&copy_buf->list, &audio->play_queue);
schedule_work(&audio->playback_work);
copy_buf = f_audio_buffer_alloc(audio_buf_size);
- if (copy_buf < 0)
+ if (IS_ERR(copy_buf))
return -ENOMEM;
}
@@ -576,6 +579,8 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
usb_ep_enable(out_ep, audio->out_desc);
out_ep->driver_data = audio;
audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+ if (IS_ERR(audio->copy_buf))
+ return -ENOMEM;
/*
* allocate a bunch of read buffers
@@ -787,7 +792,7 @@ int __init audio_bind_config(struct usb_configuration *c)
return status;
add_fail:
- gaudio_cleanup(&audio->card);
+ gaudio_cleanup();
setup_fail:
kfree(audio);
return status;
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5..d4f0db5 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
* b15: bmType (0 == data)
*/
len = skb->len;
- put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2));
+ put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
/* add a zero-length EEM packet, if needed */
if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
}
/* validate CRC */
- crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
if (header & BIT(14)) {
crc = get_unaligned_le32(skb->data + len
- ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 4295601..76496f5 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
#if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
#endif
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef CONFIG_USB_G_MULTI_RNDIS
# define USB_ETH_RNDIS y
#endif
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8..8b45145 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0..5fc80a1 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
hsotg->gadget.dev.driver = NULL;
return ret;
}
+EXPORT_SYMBOL(usb_gadget_register_driver);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
index 8252595d..35e0930 100644
--- a/drivers/usb/gadget/u_audio.c
+++ b/drivers/usb/gadget/u_audio.c
@@ -288,6 +288,7 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
return 0;
}
+static struct gaudio *the_card;
/**
* gaudio_setup - setup ALSA interface and preparing for USB transfer
*
@@ -303,6 +304,9 @@ int __init gaudio_setup(struct gaudio *card)
if (ret)
ERROR(card, "we need at least one control device\n");
+ if (!the_card)
+ the_card = card;
+
return ret;
}
@@ -312,9 +316,11 @@ int __init gaudio_setup(struct gaudio *card)
*
* This is called to free all resources allocated by @gaudio_setup().
*/
-void gaudio_cleanup(struct gaudio *card)
+void gaudio_cleanup(void)
{
- if (card)
- gaudio_close_snd_dev(card);
+ if (the_card) {
+ gaudio_close_snd_dev(the_card);
+ the_card = NULL;
+ }
}
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
index cc8d159..08ffce3 100644
--- a/drivers/usb/gadget/u_audio.h
+++ b/drivers/usb/gadget/u_audio.h
@@ -51,6 +51,6 @@ struct gaudio {
};
int gaudio_setup(struct gaudio *card);
-void gaudio_cleanup(struct gaudio *card);
+void gaudio_cleanup(void);
#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 5859522..1ec3857 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -787,9 +787,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* start 20 msec resume signaling from this port,
* and make khubd collect PORT_STAT_C_SUSPEND to
- * stop that signaling.
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
*/
- ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
+ ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2c6571c..1937267 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
del_timer_sync(&ehci->watchdog);
del_timer_sync(&ehci->iaa_watchdog);
- port = HCS_N_PORTS (ehci->hcs_params);
spin_lock_irq (&ehci->lock);
+ /* Once the controller is stopped, port resumes that are already
+ * in progress won't complete. Hence if remote wakeup is enabled
+ * for the root hub and any ports are in the middle of a resume or
+ * remote wakeup, we must fail the suspend.
+ */
+ if (hcd->self.root_hub->do_remote_wakeup) {
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ if (ehci->reset_done[port] != 0) {
+ spin_unlock_irq(&ehci->lock);
+ ehci_dbg(ehci, "suspend failed because "
+ "port %d is resuming\n",
+ port + 1);
+ return -EBUSY;
+ }
+ }
+ }
+
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce (ehci);
@@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
*/
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
+ port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
@@ -178,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
if (hostpc_reg) {
u32 t3;
+ spin_unlock_irq(&ehci->lock);
msleep(5);/* 5ms for HCD enter low pwr mode */
+ spin_lock_irq(&ehci->lock);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
@@ -886,17 +906,18 @@ static int ehci_hub_control (
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
- ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have hostpc feature
*/
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
if (hostpc_reg) {
- temp &= ~PORT_WKCONN_E;
- temp |= (PORT_WKDISC_E | PORT_WKOC_E);
- ehci_writel(ehci, temp | PORT_SUSPEND,
- status_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* 5ms for HCD enter low pwr mode */
+ spin_lock_irqsave(&ehci->lock, flags);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 | HOSTPC_PHCD,
hostpc_reg);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 12f1ad2..74d07f4 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -37,7 +37,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <mach/usb.h>
+#include <plat/usb.h>
/*
* OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index a427d3b..8952177 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -849,9 +849,10 @@ qh_make (
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
- dbg ("intr period %d uframes, NYET!",
- urb->interval);
- goto done;
+ urb->interval = 1;
+ } else if (qh->period > ehci->periodic_size) {
+ qh->period = ehci->periodic_size;
+ urb->interval = qh->period << 3;
}
} else {
int think_time;
@@ -874,6 +875,10 @@ qh_make (
usb_calc_bus_time (urb->dev->speed,
is_input, 0, max_packet (maxp)));
qh->period = urb->interval;
+ if (qh->period > ehci->periodic_size) {
+ qh->period = ehci->periodic_size;
+ urb->interval = qh->period;
+ }
}
}
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 0951818..78e7c3c 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -242,9 +242,10 @@ err:
static void fhci_usb_free(void *lld)
{
struct fhci_usb *usb = lld;
- struct fhci_hcd *fhci = usb->fhci;
+ struct fhci_hcd *fhci;
if (usb) {
+ fhci = usb->fhci;
fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF);
fhci_ep0_free(usb);
kfree(usb->actual_frame);
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 00a2985..ff43747 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
pkt->info = 0;
pkt->priv_data = NULL;
- cq_put(usb->ep0->empty_frame_Q, pkt);
+ cq_put(&usb->ep0->empty_frame_Q, pkt);
}
/* confirm submitted packet */
@@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
if ((td->data + td->actual_len) && trans_len)
memcpy(td->data + td->actual_len, pkt->data,
trans_len);
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
}
recycle_frame(usb, pkt);
@@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
}
/* update frame object fields before transmitting */
- pkt = cq_get(usb->ep0->empty_frame_Q);
+ pkt = cq_get(&usb->ep0->empty_frame_Q);
if (!pkt) {
fhci_dbg(usb->fhci, "there is no empty frame\n");
return -1;
@@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
pkt->info = 0;
if (data == NULL) {
- data = cq_get(usb->ep0->dummy_packets_Q);
+ data = cq_get(&usb->ep0->dummy_packets_Q);
BUG_ON(!data);
pkt->info = PKT_DUMMY_PACKET;
}
@@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
list_del_init(&td->frame_lh);
td->status = USB_TD_OK;
if (pkt->info & PKT_DUMMY_PACKET)
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
recycle_frame(usb, pkt);
usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
fhci_err(usb->fhci, "host transaction failed\n");
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index b403322..e123289 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,34 +105,34 @@ void fhci_ep0_free(struct fhci_usb *usb)
if (ep->td_base)
cpm_muram_free(cpm_muram_offset(ep->td_base));
- if (ep->conf_frame_Q) {
- size = cq_howmany(ep->conf_frame_Q);
+ if (kfifo_initialized(&ep->conf_frame_Q)) {
+ size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->conf_frame_Q);
+ struct packet *pkt = cq_get(&ep->conf_frame_Q);
kfree(pkt);
}
- cq_delete(ep->conf_frame_Q);
+ cq_delete(&ep->conf_frame_Q);
}
- if (ep->empty_frame_Q) {
- size = cq_howmany(ep->empty_frame_Q);
+ if (kfifo_initialized(&ep->empty_frame_Q)) {
+ size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->empty_frame_Q);
+ struct packet *pkt = cq_get(&ep->empty_frame_Q);
kfree(pkt);
}
- cq_delete(ep->empty_frame_Q);
+ cq_delete(&ep->empty_frame_Q);
}
- if (ep->dummy_packets_Q) {
- size = cq_howmany(ep->dummy_packets_Q);
+ if (kfifo_initialized(&ep->dummy_packets_Q)) {
+ size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
- u8 *buff = cq_get(ep->dummy_packets_Q);
+ u8 *buff = cq_get(&ep->dummy_packets_Q);
kfree(buff);
}
- cq_delete(ep->dummy_packets_Q);
+ cq_delete(&ep->dummy_packets_Q);
}
kfree(ep);
@@ -175,10 +175,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
ep->td_base = cpm_muram_addr(ep_offset);
/* zero all queue pointers */
- ep->conf_frame_Q = cq_new(ring_len + 2);
- ep->empty_frame_Q = cq_new(ring_len + 2);
- ep->dummy_packets_Q = cq_new(ring_len + 2);
- if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
+ if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
+ cq_new(&ep->empty_frame_Q, ring_len + 2) ||
+ cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
err_for = "frame_queues";
goto err;
}
@@ -199,8 +198,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
err_for = "buffer";
goto err;
}
- cq_put(ep->empty_frame_Q, pkt);
- cq_put(ep->dummy_packets_Q, buff);
+ cq_put(&ep->empty_frame_Q, pkt);
+ cq_put(&ep->dummy_packets_Q, buff);
}
/* we put the endpoint parameter RAM right behind the TD ring */
@@ -319,7 +318,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
continue;
- pkt = cq_get(ep->conf_frame_Q);
+ pkt = cq_get(&ep->conf_frame_Q);
if (!pkt)
fhci_err(usb->fhci, "no frame to confirm\n");
@@ -460,9 +459,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
out_be16(&td->length, pkt->len);
/* put the frame to the confirmation queue */
- cq_put(ep->conf_frame_Q, pkt);
+ cq_put(&ep->conf_frame_Q, pkt);
- if (cq_howmany(ep->conf_frame_Q) == 1)
+ if (cq_howmany(&ep->conf_frame_Q) == 1)
out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
return 0;
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 7116284..72dae1c 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -423,9 +423,9 @@ struct endpoint {
struct usb_td __iomem *td_base; /* first TD in the ring */
struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
- struct kfifo *empty_frame_Q; /* Empty frames list to use */
- struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */
- struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */
+ struct kfifo empty_frame_Q; /* Empty frames list to use */
+ struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
+ struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
bool already_pushed_dummy_bd;
};
@@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
}
/* fifo of pointers */
-static inline struct kfifo *cq_new(int size)
+static inline int cq_new(struct kfifo *fifo, int size)
{
- return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL);
+ return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
}
static inline void cq_delete(struct kfifo *kfifo)
@@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo)
static inline unsigned int cq_howmany(struct kfifo *kfifo)
{
- return __kfifo_len(kfifo) / sizeof(void *);
+ return kfifo_len(kfifo) / sizeof(void *);
}
static inline int cq_put(struct kfifo *kfifo, void *p)
{
- return __kfifo_put(kfifo, (void *)&p, sizeof(p));
+ return kfifo_in(kfifo, (void *)&p, sizeof(p));
}
static inline void *cq_get(struct kfifo *kfifo)
{
void *p = NULL;
- __kfifo_get(kfifo, (void *)&p, sizeof(p));
+ kfifo_out(kfifo, (void *)&p, sizeof(p));
return p;
}
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 73352f3..4297165 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2270,10 +2270,10 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
istl_size / 2, istl_size, 0, istl_size / 2);
- dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n",
+ dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
intl_size, istl_size);
- dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n",
+ dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
atl_buffers, atl_blksize - PTD_HEADER_SIZE,
atl_size, istl_size + intl_size);
dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
@@ -2697,6 +2697,8 @@ static int __init isp1362_probe(struct platform_device *pdev)
void __iomem *data_reg;
int irq;
int retval = 0;
+ struct resource *irq_res;
+ unsigned int irq_flags = 0;
/* basic sanity checks first. board-specific init logic should
* have initialized this the three resources and probably board
@@ -2710,11 +2712,12 @@ static int __init isp1362_probe(struct platform_device *pdev)
data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- irq = platform_get_irq(pdev, 0);
- if (!addr || !data || irq < 0) {
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!addr || !data || !irq_res) {
retval = -ENODEV;
goto err1;
}
+ irq = irq_res->start;
#ifdef CONFIG_USB_HCD_DMA
if (pdev->dev.dma_mask) {
@@ -2781,12 +2784,16 @@ static int __init isp1362_probe(struct platform_device *pdev)
}
#endif
-#ifdef CONFIG_ARM
- if (isp1362_hcd->board)
- set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING);
-#endif
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ irq_flags |= IRQF_TRIGGER_RISING;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+ irq_flags |= IRQF_TRIGGER_FALLING;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+ irq_flags |= IRQF_TRIGGER_HIGH;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+ irq_flags |= IRQF_TRIGGER_LOW;
- retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
if (retval != 0)
goto err6;
pr_info("%s, irq %d\n", hcd->product_desc, irq);
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 9600a58..27b8f7c 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1039,12 +1039,12 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
if (!nakcount && (dw3 & DW3_QTD_ACTIVE)) {
u32 buffstatus;
- /* XXX
+ /*
* NAKs are handled in HW by the chip. Usually if the
* device is not able to send data fast enough.
- * This did not trigger for a long time now.
+ * This happens mostly on slower hardware.
*/
- printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: "
+ printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: "
"%d of %zu done: %08x cur: %08x\n", qtd,
urb, qh, PTD_XFERRED_LENGTH(dw3),
qtd->length, done_map,
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index b7a661c..bee558ae 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/irq.h>
+#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "r8a66597.h"
@@ -216,8 +218,17 @@ static void disable_controller(struct r8a66597 *r8a66597)
{
int port;
+ /* disable interrupts */
r8a66597_write(r8a66597, 0, INTENB0);
- r8a66597_write(r8a66597, 0, INTSTS0);
+ r8a66597_write(r8a66597, 0, INTENB1);
+ r8a66597_write(r8a66597, 0, BRDYENB);
+ r8a66597_write(r8a66597, 0, BEMPENB);
+ r8a66597_write(r8a66597, 0, NRDYENB);
+
+ /* clear status */
+ r8a66597_write(r8a66597, 0, BRDYSTS);
+ r8a66597_write(r8a66597, 0, NRDYSTS);
+ r8a66597_write(r8a66597, 0, BEMPSTS);
for (port = 0; port < r8a66597->max_root_hub; port++)
r8a66597_disable_port(r8a66597, port);
@@ -811,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
}
+static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
+ int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
+ }
+
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
+ spin_lock(&r8a66597->lock);
+}
+
/* this function must be called with interrupt disabled */
static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
{
@@ -829,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
list_del(&td->queue);
kfree(td);
- if (urb) {
- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
- urb);
+ if (urb)
+ r8a66597_urb_done(r8a66597, urb, -ENODEV);
- spin_unlock(&r8a66597->lock);
- usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
- -ENODEV);
- spin_lock(&r8a66597->lock);
- }
break;
}
}
@@ -997,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
{
if (syssts == SE0) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1014,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
}
+ spin_unlock(&r8a66597->lock);
usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
+ spin_lock(&r8a66597->lock);
}
/* this function must be called with interrupt disabled */
@@ -1274,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
if (usb_pipeisoc(urb->pipe))
urb->start_frame = r8a66597_get_frame(hcd);
- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
- spin_unlock(&r8a66597->lock);
- usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&r8a66597->lock);
+ r8a66597_urb_done(r8a66597, urb, status);
}
if (restart) {
@@ -2466,6 +2492,12 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
r8a66597->rh_timer.data = (unsigned long)r8a66597;
r8a66597->reg = (unsigned long)reg;
+ /* make sure no interrupts are pending */
+ ret = r8a66597_clock_enable(r8a66597);
+ if (ret < 0)
+ goto clean_up3;
+ disable_controller(r8a66597);
+
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
init_timer(&r8a66597->td_timer[i]);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 5cd0e48..99cd00f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
spin_lock_irq(&uhci->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
rc = -ESHUTDOWN;
- else if (!uhci->dead)
+ else if (uhci->dead)
+ ; /* Dead controllers tell no tales */
+
+ /* Once the controller is stopped, port resumes that are already
+ * in progress won't complete. Hence if remote wakeup is enabled
+ * for the root hub and any ports are in the middle of a resume or
+ * remote wakeup, we must fail the suspend.
+ */
+ else if (hcd->self.root_hub->do_remote_wakeup &&
+ uhci->resuming_ports) {
+ dev_dbg(uhci_dev(uhci), "suspend failed because a port "
+ "is resuming\n");
+ rc = -EBUSY;
+ } else
suspend_rh(uhci, UHCI_RH_SUSPENDED);
spin_unlock_irq(&uhci->lock);
return rc;
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 885b585..8270055 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
/* Port received a wakeup request */
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
- msecs_to_jiffies(20);
+ msecs_to_jiffies(25);
/* Make sure we see the port again
* after the resuming period is over. */
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1d8e39a..1eb9e41 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -60,6 +60,7 @@
static struct usb_device_id appledisplay_table [] = {
{ APPLEDISPLAY_DEVICE(0x9218) },
{ APPLEDISPLAY_DEVICE(0x9219) },
+ { APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
/* Terminating entry */
@@ -72,8 +73,8 @@ struct appledisplay {
struct usb_device *udev; /* usb device */
struct urb *urb; /* usb request block */
struct backlight_device *bd; /* backlight device */
- char *urbdata; /* interrupt URB data buffer */
- char *msgdata; /* control message data buffer */
+ u8 *urbdata; /* interrupt URB data buffer */
+ u8 *msgdata; /* control message data buffer */
struct delayed_work work;
int button_pressed;
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 602ee05..59860b3 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
err("%s - error loading firmware: error = %d", __func__, err);
goto wraperr;
}
- } while (i > 0);
+ } while (rec);
/* Assert reset (stop the CPU in the EMI) */
err = emi62_set_reset(dev,1);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847..8b37a4b 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fe4934d..ad26e65 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -29,6 +29,8 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
{
void __iomem *fifo = hw_ep->fifo;
void __iomem *epio = hw_ep->regs;
+ u8 epnum = hw_ep->epnum;
+ u16 dma_reg = 0;
prefetch((u8 *)src);
@@ -39,67 +41,113 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
dump_fifo_data(src, len);
- if (unlikely((unsigned long)src & 0x01))
- outsw_8((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
- else
- outsw((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-}
+ if (!ANOMALY_05000380 && epnum != 0) {
+ flush_dcache_range((unsigned int)src,
+ (unsigned int)(src + len));
+
+ /* Setup DMA address register */
+ dma_reg = (u16) ((u32) src & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+ SSYNC();
+
+ dma_reg = (u16) (((u32) src >> 16) & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+ SSYNC();
+
+ /* Setup DMA count register */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+ SSYNC();
+
+ /* Enable the DMA */
+ dma_reg = (epnum << 4) | DMA_ENA | INT_ENA | DIRECTION;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+ SSYNC();
+
+ /* Wait for compelete */
+ while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+ cpu_relax();
+
+ /* acknowledge dma interrupt */
+ bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+ SSYNC();
+
+ /* Reset DMA */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+ SSYNC();
+ } else {
+ SSYNC();
+
+ if (unlikely((unsigned long)src & 0x01))
+ outsw_8((unsigned long)fifo, src,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ else
+ outsw((unsigned long)fifo, src,
+ len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ }
+}
/*
* Unload an endpoint's FIFO
*/
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
-
-#ifdef CONFIG_BF52x
u8 epnum = hw_ep->epnum;
u16 dma_reg = 0;
- invalidate_dcache_range((unsigned int)dst,
- (unsigned int)(dst + len));
+ if (ANOMALY_05000467 && epnum != 0) {
- /* Setup DMA address register */
- dma_reg = (u16) ((u32) dst & 0xFFFF);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
- SSYNC();
+ invalidate_dcache_range((unsigned int)dst,
+ (unsigned int)(dst + len));
- dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
- SSYNC();
+ /* Setup DMA address register */
+ dma_reg = (u16) ((u32) dst & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
+ SSYNC();
- /* Setup DMA count register */
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
- SSYNC();
+ dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
+ SSYNC();
- /* Enable the DMA */
- dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
- SSYNC();
+ /* Setup DMA count register */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
+ SSYNC();
- /* Wait for compelete */
- while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
- cpu_relax();
+ /* Enable the DMA */
+ dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
+ SSYNC();
- /* acknowledge dma interrupt */
- bfin_write_USB_DMA_INTERRUPT(1 << epnum);
- SSYNC();
+ /* Wait for compelete */
+ while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
+ cpu_relax();
- /* Reset DMA */
- bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
- SSYNC();
-#else
- if (unlikely((unsigned long)dst & 0x01))
- insw_8((unsigned long)fifo, dst,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
- else
- insw((unsigned long)fifo, dst,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-#endif
+ /* acknowledge dma interrupt */
+ bfin_write_USB_DMA_INTERRUPT(1 << epnum);
+ SSYNC();
+ /* Reset DMA */
+ bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
+ SSYNC();
+ } else {
+ SSYNC();
+ /* Read the last byte of packet with odd size from address fifo + 4
+ * to trigger 1 byte access to EP0 FIFO.
+ */
+ if (len == 1)
+ *dst = (u8)inw((unsigned long)fifo + 4);
+ else {
+ if (unlikely((unsigned long)dst & 0x01))
+ insw_8((unsigned long)fifo, dst, len >> 1);
+ else
+ insw((unsigned long)fifo, dst, len >> 1);
+
+ if (len & 0x01)
+ *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4);
+ }
+ }
DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
'R', hw_ep->epnum, fifo, len, dst);
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index 10b7d75..bd9352a 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -69,7 +69,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
#define dump_fifo_data(buf, len) do {} while (0)
#endif
-#ifdef CONFIG_BF52x
#define USB_DMA_BASE USB_DMA_INTERRUPT
#define USB_DMAx_CTRL 0x04
@@ -79,7 +78,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
#define USB_DMAx_COUNT_HIGH 0x14
#define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg)
-#endif
/* Almost 1 second */
#define TIMER_DELAY (1 * HZ)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index ef2332a..a44a450 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1154,8 +1154,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
struct musb_hw_ep *hw_ep = NULL;
u32 rx, tx;
int i, index;
+ unsigned long flags;
cppi = container_of(musb->dma_controller, struct cppi, controller);
+ if (cppi->irq)
+ spin_lock_irqsave(&musb->lock, flags);
tibase = musb->ctrl_base;
@@ -1285,6 +1288,9 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
/* write to CPPI EOI register to re-enable interrupts */
musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index e16ff60..6691381 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -42,7 +42,7 @@
#include "musb_core.h"
#ifdef CONFIG_MACH_DAVINCI_EVM
-#define GPIO_nVBUS_DRV 144
+#define GPIO_nVBUS_DRV 160
#endif
#include "davinci.h"
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bfe08f4..5eb9318 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1319,7 +1319,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
#endif
u8 reg;
char *type;
- char aInfo[78], aRevision[32], aDate[12];
+ char aInfo[90], aRevision[32], aDate[12];
void __iomem *mbase = musb->mregs;
int status = 0;
int i;
@@ -1521,6 +1521,14 @@ irqreturn_t musb_interrupt(struct musb *musb)
(devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
musb->int_usb, musb->int_tx, musb->int_rx);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
+ if (!musb->gadget_driver) {
+ DBG(5, "No gadget driver loaded\n");
+ return IRQ_HANDLED;
+ }
+#endif
+
/* the core can interrupt us for multiple reasons; docs have
* a generic interrupt flowchart to follow
*/
@@ -2139,7 +2147,7 @@ static int __init musb_probe(struct platform_device *pdev)
return musb_init_controller(dev, irq, base);
}
-static int __devexit musb_remove(struct platform_device *pdev)
+static int __exit musb_remove(struct platform_device *pdev)
{
struct musb *musb = dev_to_musb(&pdev->dev);
void __iomem *ctrl_base = musb->ctrl_base;
@@ -2231,7 +2239,7 @@ static struct platform_driver musb_driver = {
.owner = THIS_MODULE,
.pm = MUSB_DEV_PM_OPS,
},
- .remove = __devexit_p(musb_remove),
+ .remove = __exit_p(musb_remove),
.shutdown = musb_shutdown,
};
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index c49b9ba..cbcf14a2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -309,7 +309,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
size_t request_size;
/* setup DMA, then program endpoint CSR */
- request_size = min(request->length,
+ request_size = min_t(size_t, request->length,
musb_ep->dma->max_len);
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
@@ -319,7 +319,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
- request->dma, request_size);
+ request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
@@ -515,12 +515,12 @@ void musb_g_tx(struct musb *musb, u8 epnum)
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
return;
- if (!musb_ep->desc) {
+ request = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!request) {
DBG(4, "%s idle now\n",
musb_ep->end_point.name);
return;
- } else
- request = next_request(musb_ep);
+ }
}
txstate(musb, to_musb_request(request));
@@ -746,6 +746,8 @@ void musb_g_rx(struct musb *musb, u8 epnum)
musb_ep_select(mbase, epnum);
request = next_request(musb_ep);
+ if (!request)
+ return;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
@@ -1731,6 +1733,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
spin_lock_irqsave(&musb->lock, flags);
otg_set_peripheral(musb->xceiv, &musb->g);
+ musb->xceiv->state = OTG_STATE_B_IDLE;
musb->is_active = 1;
/* FIXME this ignores the softconnect flag. Drivers are
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 8fba3f1..53d0645 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -664,7 +664,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
break;
default:
- ERR("SetupEnd came in a wrong ep0stage %s",
+ ERR("SetupEnd came in a wrong ep0stage %s\n",
decode_ep0stage(musb->ep0_state));
}
csr = musb_readw(regs, MUSB_CSR0);
@@ -787,12 +787,18 @@ setup:
handled = service_zero_data_request(
musb, &setup);
+ /*
+ * We're expecting no data in any case, so
+ * always set the DATAEND bit -- doing this
+ * here helps avoid SetupEnd interrupt coming
+ * in the idle stage when we're stalling...
+ */
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+
/* status stage might be immediate */
- if (handled > 0) {
- musb->ackpend |= MUSB_CSR0_P_DATAEND;
+ if (handled > 0)
musb->ep0_state =
MUSB_EP0_STAGE_STATUSIN;
- }
break;
/* sequence #1 (IN to host), includes GET_STATUS
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d..3d2d3e5 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM
+ select USB_OTG_UTILS
help
Enable this to support ULPI connected USB OTG transceivers which
are likely found on embedded boards.
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index d54460a..78a2097 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -843,7 +843,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
static struct platform_device *otg_dev;
-static int otg_init(struct isp1301 *isp)
+static int isp1301_otg_init(struct isp1301 *isp)
{
u32 l;
@@ -1275,7 +1275,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
static int isp1301_otg_enable(struct isp1301 *isp)
{
power_up(isp);
- otg_init(isp);
+ isp1301_otg_init(isp);
/* NOTE: since we don't change this, this provides
* a few more interrupts than are strictly needed.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f99498f..7638828 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -44,12 +44,13 @@
#include <linux/serial.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
+#include "ftdi_sio_ids.h"
/*
* Version Information
*/
#define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
static int debug;
@@ -144,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+/*
+ * Device ID not listed? Test via module params product/vendor or
+ * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ */
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -551,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
/*
- * Due to many user requests for multiple ELV devices we enable
- * them by default.
+ * ELV devices:
*/
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -570,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -696,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 4586a24..b0e0d64 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -1,7 +1,10 @@
/*
- * Definitions for the FTDI USB Single Port Serial Converter -
+ * Driver definitions for the FTDI USB Single Port Serial Converter -
* known as FTDI_SIO (Serial Input/Output application of the chipset)
*
+ * For USB vendor/product IDs (VID/PID), please see ftdi_sio_ids.h
+ *
+ *
* The example I have is known as the USC-1000 which is available from
* http://www.dse.co.nz - cat no XH4214 It looks similar to this:
* http://www.dansdata.com/usbser.htm but I can't be sure There are other
@@ -17,880 +20,7 @@
* Bill Ryder - bryder@sgi.com formerly of Silicon Graphics, Inc.- wrote the
* FTDI_SIO implementation.
*
- * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
- * from Rudolf Gugler
- *
- */
-
-#define FTDI_VID 0x0403 /* Vendor Id */
-#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
-#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
-#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
-#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
-#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
-#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
-#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
-#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
-#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
-
-/* Larsen and Brusgaard AltiTrack/USBtrack */
-#define LARSENBRUSGAARD_VID 0x0FD8
-#define LB_ALTITRACK_PID 0x0001
-
-/* www.canusb.com Lawicel CANUSB device */
-#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
-
-/* AlphaMicro Components AMC-232USB01 device */
-#define FTDI_AMC232_PID 0xFF00 /* Product Id */
-
-/* www.candapter.com Ewert Energy Systems CANdapter device */
-#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
-
-/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
-/* the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
-#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
-#define FTDI_SCS_DEVICE_2_PID 0xD012
-#define FTDI_SCS_DEVICE_3_PID 0xD013
-#define FTDI_SCS_DEVICE_4_PID 0xD014
-#define FTDI_SCS_DEVICE_5_PID 0xD015
-#define FTDI_SCS_DEVICE_6_PID 0xD016
-#define FTDI_SCS_DEVICE_7_PID 0xD017
-
-/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
-#define FTDI_ACTZWAVE_PID 0xF2D0
-
-
-/* www.starting-point-systems.com µChameleon device */
-#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
-
-/* www.irtrans.de device */
-#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
-
-
-/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
-#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
-
-/* iPlus device */
-#define FTDI_IPLUS_PID 0xD070 /* Product Id */
-#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
-
-/* DMX4ALL DMX Interfaces */
-#define FTDI_DMX4ALL 0xC850
-
-/* OpenDCC (www.opendcc.de) product id */
-#define FTDI_OPENDCC_PID 0xBFD8
-#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
-#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
-#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
-
-/* Sprog II (Andrew Crosland's SprogII DCC interface) */
-#define FTDI_SPROG_II 0xF0C8
-
-/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
-/* they use the ftdi chipset for the USB interface and the vendor id is the same */
-#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
-#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
-#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
-#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
-#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
-#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
-#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
-#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
-
-/* Video Networks Limited / Homechoice in the UK use an ftdi-based device for their 1Mb */
-/* broadband internet service. The following PID is exhibited by the usb device supplied */
-/* (the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
-
-/*
- * PCDJ use ftdi based dj-controllers. The following PID is for their DAC-2 device
- * http://www.pcdjhardware.com/DAC2.asp (PID sent by Wouter Paesen)
- * (the VID is the standard ftdi vid (FTDI_VID) */
-#define FTDI_PCDJ_DAC2_PID 0xFA88
-
-/*
- * The following are the values for the Matrix Orbital LCD displays,
- * which are the FT232BM ( similar to the 8U232AM )
- */
-#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
-#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
-
-/* OOCDlink by Joern Kaipf <joernk@web.de>
- * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
-#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
-
-/*
- * The following are the values for the Matrix Orbital FTDI Range
- * Anything in this range will use an FT232RL.
- */
-#define MTXORB_VID 0x1B3D
-#define MTXORB_FTDI_RANGE_0100_PID 0x0100
-#define MTXORB_FTDI_RANGE_0101_PID 0x0101
-#define MTXORB_FTDI_RANGE_0102_PID 0x0102
-#define MTXORB_FTDI_RANGE_0103_PID 0x0103
-#define MTXORB_FTDI_RANGE_0104_PID 0x0104
-#define MTXORB_FTDI_RANGE_0105_PID 0x0105
-#define MTXORB_FTDI_RANGE_0106_PID 0x0106
-#define MTXORB_FTDI_RANGE_0107_PID 0x0107
-#define MTXORB_FTDI_RANGE_0108_PID 0x0108
-#define MTXORB_FTDI_RANGE_0109_PID 0x0109
-#define MTXORB_FTDI_RANGE_010A_PID 0x010A
-#define MTXORB_FTDI_RANGE_010B_PID 0x010B
-#define MTXORB_FTDI_RANGE_010C_PID 0x010C
-#define MTXORB_FTDI_RANGE_010D_PID 0x010D
-#define MTXORB_FTDI_RANGE_010E_PID 0x010E
-#define MTXORB_FTDI_RANGE_010F_PID 0x010F
-#define MTXORB_FTDI_RANGE_0110_PID 0x0110
-#define MTXORB_FTDI_RANGE_0111_PID 0x0111
-#define MTXORB_FTDI_RANGE_0112_PID 0x0112
-#define MTXORB_FTDI_RANGE_0113_PID 0x0113
-#define MTXORB_FTDI_RANGE_0114_PID 0x0114
-#define MTXORB_FTDI_RANGE_0115_PID 0x0115
-#define MTXORB_FTDI_RANGE_0116_PID 0x0116
-#define MTXORB_FTDI_RANGE_0117_PID 0x0117
-#define MTXORB_FTDI_RANGE_0118_PID 0x0118
-#define MTXORB_FTDI_RANGE_0119_PID 0x0119
-#define MTXORB_FTDI_RANGE_011A_PID 0x011A
-#define MTXORB_FTDI_RANGE_011B_PID 0x011B
-#define MTXORB_FTDI_RANGE_011C_PID 0x011C
-#define MTXORB_FTDI_RANGE_011D_PID 0x011D
-#define MTXORB_FTDI_RANGE_011E_PID 0x011E
-#define MTXORB_FTDI_RANGE_011F_PID 0x011F
-#define MTXORB_FTDI_RANGE_0120_PID 0x0120
-#define MTXORB_FTDI_RANGE_0121_PID 0x0121
-#define MTXORB_FTDI_RANGE_0122_PID 0x0122
-#define MTXORB_FTDI_RANGE_0123_PID 0x0123
-#define MTXORB_FTDI_RANGE_0124_PID 0x0124
-#define MTXORB_FTDI_RANGE_0125_PID 0x0125
-#define MTXORB_FTDI_RANGE_0126_PID 0x0126
-#define MTXORB_FTDI_RANGE_0127_PID 0x0127
-#define MTXORB_FTDI_RANGE_0128_PID 0x0128
-#define MTXORB_FTDI_RANGE_0129_PID 0x0129
-#define MTXORB_FTDI_RANGE_012A_PID 0x012A
-#define MTXORB_FTDI_RANGE_012B_PID 0x012B
-#define MTXORB_FTDI_RANGE_012C_PID 0x012C
-#define MTXORB_FTDI_RANGE_012D_PID 0x012D
-#define MTXORB_FTDI_RANGE_012E_PID 0x012E
-#define MTXORB_FTDI_RANGE_012F_PID 0x012F
-#define MTXORB_FTDI_RANGE_0130_PID 0x0130
-#define MTXORB_FTDI_RANGE_0131_PID 0x0131
-#define MTXORB_FTDI_RANGE_0132_PID 0x0132
-#define MTXORB_FTDI_RANGE_0133_PID 0x0133
-#define MTXORB_FTDI_RANGE_0134_PID 0x0134
-#define MTXORB_FTDI_RANGE_0135_PID 0x0135
-#define MTXORB_FTDI_RANGE_0136_PID 0x0136
-#define MTXORB_FTDI_RANGE_0137_PID 0x0137
-#define MTXORB_FTDI_RANGE_0138_PID 0x0138
-#define MTXORB_FTDI_RANGE_0139_PID 0x0139
-#define MTXORB_FTDI_RANGE_013A_PID 0x013A
-#define MTXORB_FTDI_RANGE_013B_PID 0x013B
-#define MTXORB_FTDI_RANGE_013C_PID 0x013C
-#define MTXORB_FTDI_RANGE_013D_PID 0x013D
-#define MTXORB_FTDI_RANGE_013E_PID 0x013E
-#define MTXORB_FTDI_RANGE_013F_PID 0x013F
-#define MTXORB_FTDI_RANGE_0140_PID 0x0140
-#define MTXORB_FTDI_RANGE_0141_PID 0x0141
-#define MTXORB_FTDI_RANGE_0142_PID 0x0142
-#define MTXORB_FTDI_RANGE_0143_PID 0x0143
-#define MTXORB_FTDI_RANGE_0144_PID 0x0144
-#define MTXORB_FTDI_RANGE_0145_PID 0x0145
-#define MTXORB_FTDI_RANGE_0146_PID 0x0146
-#define MTXORB_FTDI_RANGE_0147_PID 0x0147
-#define MTXORB_FTDI_RANGE_0148_PID 0x0148
-#define MTXORB_FTDI_RANGE_0149_PID 0x0149
-#define MTXORB_FTDI_RANGE_014A_PID 0x014A
-#define MTXORB_FTDI_RANGE_014B_PID 0x014B
-#define MTXORB_FTDI_RANGE_014C_PID 0x014C
-#define MTXORB_FTDI_RANGE_014D_PID 0x014D
-#define MTXORB_FTDI_RANGE_014E_PID 0x014E
-#define MTXORB_FTDI_RANGE_014F_PID 0x014F
-#define MTXORB_FTDI_RANGE_0150_PID 0x0150
-#define MTXORB_FTDI_RANGE_0151_PID 0x0151
-#define MTXORB_FTDI_RANGE_0152_PID 0x0152
-#define MTXORB_FTDI_RANGE_0153_PID 0x0153
-#define MTXORB_FTDI_RANGE_0154_PID 0x0154
-#define MTXORB_FTDI_RANGE_0155_PID 0x0155
-#define MTXORB_FTDI_RANGE_0156_PID 0x0156
-#define MTXORB_FTDI_RANGE_0157_PID 0x0157
-#define MTXORB_FTDI_RANGE_0158_PID 0x0158
-#define MTXORB_FTDI_RANGE_0159_PID 0x0159
-#define MTXORB_FTDI_RANGE_015A_PID 0x015A
-#define MTXORB_FTDI_RANGE_015B_PID 0x015B
-#define MTXORB_FTDI_RANGE_015C_PID 0x015C
-#define MTXORB_FTDI_RANGE_015D_PID 0x015D
-#define MTXORB_FTDI_RANGE_015E_PID 0x015E
-#define MTXORB_FTDI_RANGE_015F_PID 0x015F
-#define MTXORB_FTDI_RANGE_0160_PID 0x0160
-#define MTXORB_FTDI_RANGE_0161_PID 0x0161
-#define MTXORB_FTDI_RANGE_0162_PID 0x0162
-#define MTXORB_FTDI_RANGE_0163_PID 0x0163
-#define MTXORB_FTDI_RANGE_0164_PID 0x0164
-#define MTXORB_FTDI_RANGE_0165_PID 0x0165
-#define MTXORB_FTDI_RANGE_0166_PID 0x0166
-#define MTXORB_FTDI_RANGE_0167_PID 0x0167
-#define MTXORB_FTDI_RANGE_0168_PID 0x0168
-#define MTXORB_FTDI_RANGE_0169_PID 0x0169
-#define MTXORB_FTDI_RANGE_016A_PID 0x016A
-#define MTXORB_FTDI_RANGE_016B_PID 0x016B
-#define MTXORB_FTDI_RANGE_016C_PID 0x016C
-#define MTXORB_FTDI_RANGE_016D_PID 0x016D
-#define MTXORB_FTDI_RANGE_016E_PID 0x016E
-#define MTXORB_FTDI_RANGE_016F_PID 0x016F
-#define MTXORB_FTDI_RANGE_0170_PID 0x0170
-#define MTXORB_FTDI_RANGE_0171_PID 0x0171
-#define MTXORB_FTDI_RANGE_0172_PID 0x0172
-#define MTXORB_FTDI_RANGE_0173_PID 0x0173
-#define MTXORB_FTDI_RANGE_0174_PID 0x0174
-#define MTXORB_FTDI_RANGE_0175_PID 0x0175
-#define MTXORB_FTDI_RANGE_0176_PID 0x0176
-#define MTXORB_FTDI_RANGE_0177_PID 0x0177
-#define MTXORB_FTDI_RANGE_0178_PID 0x0178
-#define MTXORB_FTDI_RANGE_0179_PID 0x0179
-#define MTXORB_FTDI_RANGE_017A_PID 0x017A
-#define MTXORB_FTDI_RANGE_017B_PID 0x017B
-#define MTXORB_FTDI_RANGE_017C_PID 0x017C
-#define MTXORB_FTDI_RANGE_017D_PID 0x017D
-#define MTXORB_FTDI_RANGE_017E_PID 0x017E
-#define MTXORB_FTDI_RANGE_017F_PID 0x017F
-#define MTXORB_FTDI_RANGE_0180_PID 0x0180
-#define MTXORB_FTDI_RANGE_0181_PID 0x0181
-#define MTXORB_FTDI_RANGE_0182_PID 0x0182
-#define MTXORB_FTDI_RANGE_0183_PID 0x0183
-#define MTXORB_FTDI_RANGE_0184_PID 0x0184
-#define MTXORB_FTDI_RANGE_0185_PID 0x0185
-#define MTXORB_FTDI_RANGE_0186_PID 0x0186
-#define MTXORB_FTDI_RANGE_0187_PID 0x0187
-#define MTXORB_FTDI_RANGE_0188_PID 0x0188
-#define MTXORB_FTDI_RANGE_0189_PID 0x0189
-#define MTXORB_FTDI_RANGE_018A_PID 0x018A
-#define MTXORB_FTDI_RANGE_018B_PID 0x018B
-#define MTXORB_FTDI_RANGE_018C_PID 0x018C
-#define MTXORB_FTDI_RANGE_018D_PID 0x018D
-#define MTXORB_FTDI_RANGE_018E_PID 0x018E
-#define MTXORB_FTDI_RANGE_018F_PID 0x018F
-#define MTXORB_FTDI_RANGE_0190_PID 0x0190
-#define MTXORB_FTDI_RANGE_0191_PID 0x0191
-#define MTXORB_FTDI_RANGE_0192_PID 0x0192
-#define MTXORB_FTDI_RANGE_0193_PID 0x0193
-#define MTXORB_FTDI_RANGE_0194_PID 0x0194
-#define MTXORB_FTDI_RANGE_0195_PID 0x0195
-#define MTXORB_FTDI_RANGE_0196_PID 0x0196
-#define MTXORB_FTDI_RANGE_0197_PID 0x0197
-#define MTXORB_FTDI_RANGE_0198_PID 0x0198
-#define MTXORB_FTDI_RANGE_0199_PID 0x0199
-#define MTXORB_FTDI_RANGE_019A_PID 0x019A
-#define MTXORB_FTDI_RANGE_019B_PID 0x019B
-#define MTXORB_FTDI_RANGE_019C_PID 0x019C
-#define MTXORB_FTDI_RANGE_019D_PID 0x019D
-#define MTXORB_FTDI_RANGE_019E_PID 0x019E
-#define MTXORB_FTDI_RANGE_019F_PID 0x019F
-#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
-#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
-#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
-#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
-#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
-#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
-#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
-#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
-#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
-#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
-#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
-#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
-#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
-#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
-#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
-#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
-#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
-#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
-#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
-#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
-#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
-#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
-#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
-#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
-#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
-#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
-#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
-#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
-#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
-#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
-#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
-#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
-#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
-#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
-#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
-#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
-#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
-#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
-#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
-#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
-#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
-#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
-#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
-#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
-#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
-#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
-#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
-#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
-#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
-#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
-#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
-#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
-#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
-#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
-#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
-#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
-#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
-#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
-#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
-#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
-#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
-#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
-#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
-#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
-#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
-#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
-#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
-#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
-#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
-#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
-#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
-#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
-#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
-#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
-#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
-#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
-#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
-#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
-#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
-#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
-#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
-#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
-#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
-#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
-#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
-#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
-#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
-#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
-#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
-#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
-#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
-#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
-#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
-#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
-#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
-#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
-
-
-
-/* Interbiometrics USB I/O Board */
-/* Developed for Interbiometrics by Rudolf Gugler */
-#define INTERBIOMETRICS_VID 0x1209
-#define INTERBIOMETRICS_IOBOARD_PID 0x1002
-#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
-
-/*
- * The following are the values for the Perle Systems
- * UltraPort USB serial converters
- */
-#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
-
-/*
- * The following are the values for the Sealevel SeaLINK+ adapters.
- * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
- * removed some PIDs that don't seem to match any existing products.)
- */
-#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
-#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
-#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
-#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
-#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
-#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
-#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
-#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
-#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
-#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
-#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
-#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
-#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
-#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
-#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
-#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
-#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
-#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
-#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
-#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
-#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
-#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
-#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
-#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
-#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
-#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
-#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
-#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
-#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
-#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
-#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
-#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
-#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
-#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
-#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
-#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
-#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
-#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
-#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
-#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
-#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
-#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
-#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
-#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
-#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
-#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
-#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
-#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
-
-/*
- * The following are the values for two KOBIL chipcard terminals.
- */
-#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
-#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
-#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
-
-/*
- * Icom ID-1 digital transceiver
- */
-
-#define ICOM_ID1_VID 0x0C26
-#define ICOM_ID1_PID 0x0004
-
-/*
- * ASK.fr devices
- */
-#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
-
-/*
- * FTDI USB UART chips used in construction projects from the
- * Elektor Electronics magazine (http://elektor-electronics.co.uk)
- */
-#define ELEKTOR_VID 0x0C7D
-#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
-
-/*
- * DSS-20 Sync Station for Sony Ericsson P800
- */
-#define FTDI_DSS20_PID 0xFC82
-
-/*
- * Home Electronics (www.home-electro.com) USB gadgets
- */
-#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
-
-/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
-/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
-#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
-
-/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
-
-#define FTDI_TNC_X_PID 0xEBE0
-
-/*
- * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
- *
- * The previously included PID for the UO 100 module was incorrect.
- * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
- *
- * Armin Laeuger originally sent the PID for the UM 100 module.
- */
-#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG */
-#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
-#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
-#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
-#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
-/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
- * MS Windows, rather than the FTDI Virtual Com Port drivers.
- * Maybe these will be easier to use with the libftdi/libusb user-space
- * drivers, or possibly the Comedi drivers in some cases. */
-#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
-#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
-#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
-#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
-#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
-#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
-#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
-#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
-#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
-#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
-#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
-#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
-#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
-#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
-#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
-#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
-#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
-#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
-#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
-#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
-#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
-
-/*
- * Definitions for ID TECH (www.idt-net.com) devices
- */
-#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
-#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
-
-/*
- * Definitions for Omnidirectional Control Technology, Inc. devices
- */
-#define OCT_VID 0x0B39 /* OCT vendor ID */
-/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
-/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
-/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
-#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
-
-/* an infrared receiver for user access control with IR tags */
-#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
-
-/*
- * Definitions for Artemis astronomical USB based cameras
- * Check it at http://www.artemisccd.co.uk/
- */
-#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
-
-/*
- * Definitions for ATIK Instruments astronomical USB based cameras
- * Check it at http://www.atik-instruments.com/
- */
-#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
-#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
-#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
-#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
-#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
-
-/*
- * Protego product ids
- */
-#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
-#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
-#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
-#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
-
-/*
- * Gude Analog- und Digitalsysteme GmbH
- */
-#define FTDI_GUDEADS_E808_PID 0xE808
-#define FTDI_GUDEADS_E809_PID 0xE809
-#define FTDI_GUDEADS_E80A_PID 0xE80A
-#define FTDI_GUDEADS_E80B_PID 0xE80B
-#define FTDI_GUDEADS_E80C_PID 0xE80C
-#define FTDI_GUDEADS_E80D_PID 0xE80D
-#define FTDI_GUDEADS_E80E_PID 0xE80E
-#define FTDI_GUDEADS_E80F_PID 0xE80F
-#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
-#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
-#define FTDI_GUDEADS_E88A_PID 0xE88A
-#define FTDI_GUDEADS_E88B_PID 0xE88B
-#define FTDI_GUDEADS_E88C_PID 0xE88C
-#define FTDI_GUDEADS_E88D_PID 0xE88D
-#define FTDI_GUDEADS_E88E_PID 0xE88E
-#define FTDI_GUDEADS_E88F_PID 0xE88F
-
-/*
- * Linx Technologies product ids
- */
-#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
-#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
-#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
-
-/* CCS Inc. ICDU/ICDU40 product ID - the FT232BM is used in an in-circuit-debugger */
-/* unit for PIC16's/PIC18's */
-#define FTDI_CCSICDU20_0_PID 0xF9D0
-#define FTDI_CCSICDU40_1_PID 0xF9D1
-#define FTDI_CCSMACHX_2_PID 0xF9D2
-#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
-#define FTDI_CCSICDU64_4_PID 0xF9D4
-#define FTDI_CCSPRIME8_5_PID 0xF9D5
-
-/* Inside Accesso contactless reader (http://www.insidefr.com) */
-#define INSIDE_ACCESSO 0xFAD0
-
-/*
- * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
- */
-#define INTREPID_VID 0x093C
-#define INTREPID_VALUECAN_PID 0x0601
-#define INTREPID_NEOVI_PID 0x0701
-
-/*
- * Falcom Wireless Communications GmbH
- */
-#define FALCOM_VID 0x0F94 /* Vendor Id */
-#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
-#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
-
-/*
- * SUUNTO product ids
- */
-#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
-
-/*
- * Oceanic product ids
- */
-#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
-
-/*
- * TTi (Thurlby Thandar Instruments)
- */
-#define TTI_VID 0x103E /* Vendor Id */
-#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
-
-/*
- * Definitions for B&B Electronics products.
- */
-#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
-#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
-#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
-#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
-#define BANDB_USOPTL4_PID 0xAC11
-#define BANDB_USPTL4_PID 0xAC12
-#define BANDB_USO9ML2DR_2_PID 0xAC16
-#define BANDB_USO9ML2DR_PID 0xAC17
-#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
-#define BANDB_USOPTL4DR_PID 0xAC19
-#define BANDB_485USB9F_2W_PID 0xAC25
-#define BANDB_485USB9F_4W_PID 0xAC26
-#define BANDB_232USB9M_PID 0xAC27
-#define BANDB_485USBTB_2W_PID 0xAC33
-#define BANDB_485USBTB_4W_PID 0xAC34
-#define BANDB_TTL5USB9M_PID 0xAC49
-#define BANDB_TTL3USB9M_PID 0xAC50
-#define BANDB_ZZ_PROG1_USB_PID 0xBA02
-
-/*
- * RM Michaelides CANview USB (http://www.rmcan.com)
- * CAN fieldbus interface adapter, added by port GmbH www.port.de)
- * Ian Abbott changed the macro names for consistency.
- */
-#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
-
-/*
- * EVER Eco Pro UPS (http://www.ever.com.pl/)
- */
-
-#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
-
-/*
- * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
- * and I'm not entirely sure which are used by which.
- */
-#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
-#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
-
-/*
- * Mobility Electronics products.
- */
-#define MOBILITY_VID 0x1342
-#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
-
-/*
- * microHAM product IDs (http://www.microham.com).
- * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
- * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
- * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
- */
-#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
-#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
-#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
-#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
-#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
-#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
-#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
-#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
-
-/*
- * Active Robots product ids.
- */
-#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
-
-/*
- * Xsens Technologies BV products (http://www.xsens.com).
- */
-#define XSENS_CONVERTER_0_PID 0xD388
-#define XSENS_CONVERTER_1_PID 0xD389
-#define XSENS_CONVERTER_2_PID 0xD38A
-#define XSENS_CONVERTER_3_PID 0xD38B
-#define XSENS_CONVERTER_4_PID 0xD38C
-#define XSENS_CONVERTER_5_PID 0xD38D
-#define XSENS_CONVERTER_6_PID 0xD38E
-#define XSENS_CONVERTER_7_PID 0xD38F
-
-/*
- * Teratronik product ids.
- * Submitted by O. Wölfelschneider.
- */
-#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
-#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
-
-/*
- * Evolution Robotics products (http://www.evolution.com/).
- * Submitted by Shawn M. Lavelle.
- */
-#define EVOLUTION_VID 0xDEEE /* Vendor ID */
-#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
-#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
-#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
-#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
-
-/* Pyramid Computer GmbH */
-#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
-
-/*
- * NDI (www.ndigital.com) product ids
- */
-#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
-#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
-#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
-#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
-#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
-
-/*
- * Posiflex inc retail equipment (http://www.posiflex.com.tw)
- */
-#define POSIFLEX_VID 0x0d3a /* Vendor ID */
-#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
-
-/*
- * Westrex International devices submitted by Cory Lee
- */
-#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
-#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
-
-/*
- * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
- */
-#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
-
-/*
- * Eclo (http://www.eclo.pt/) product IDs.
- * PID 0xEA90 submitted by Martin Grill.
- */
-#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
-
-/*
- * Papouch products (http://www.papouch.com/)
- * Submitted by Folkert van Heusden
- */
-
-#define PAPOUCH_VID 0x5050 /* Vendor ID */
-#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
-#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
-
-/*
- * ACG Identification Technologies GmbH products (http://www.acg.de/).
- * Submitted by anton -at- goto10 -dot- org.
*/
-#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
-
-/*
- * Yost Engineering, Inc. products (www.yostengineering.com).
- * PID 0xE050 submitted by Aaron Prose.
- */
-#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
-
-/*
- * ThorLabs USB motor drivers
- */
-#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
-
-/*
- * Testo products (http://www.testo.com/)
- * Submitted by Colin Leroy
- */
-#define TESTO_VID 0x128D
-#define TESTO_USB_INTERFACE_PID 0x0001
-
-/*
- * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
- */
-#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
-
-/*
- * Tactrix OpenPort (ECU) devices.
- * OpenPort 1.3M submitted by Donour Sizemore.
- * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
- */
-#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
-#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
-#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
-
-/*
- * Telldus Technologies
- */
-#define TELLDUS_VID 0x1781 /* Vendor ID */
-#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
-
-/*
- * IBS elektronik product ids
- * Submitted by Thomas Schleusener
- */
-#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
-#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
-#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
-#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
-#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
-#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
-#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
-#define FTDI_IBS_PROD_PID 0xff3f /* future device */
-
-/*
- * MaxStream devices www.maxstream.net
- */
-#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
-
-/* Olimex */
-#define OLIMEX_VID 0x15BA
-#define OLIMEX_ARM_USB_OCD_PID 0x0003
-
-/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
-/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
-#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
-#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
-
-/* www.elsterelectricity.com Elster Unicom III Optical Probe */
-#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
-
-/*
- * The Mobility Lab (TML)
- * Submitted by Pierre Castella
- */
-#define TML_VID 0x1B91 /* Vendor ID */
-#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
-
-/* Propox devices */
-#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
-
-/* Rig Expert Ukraine devices */
-#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
-
-/* Domintell products http://www.domintell.com */
-#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
-#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
-
-/* Alti-2 products http://www.alti-2.com */
-#define ALTI2_VID 0x1BC9
-#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
@@ -910,86 +40,6 @@
#define INTERFACE_C 3
#define INTERFACE_D 4
-/*
- * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
- * Submitted by Harald Welte <laforge@openmoko.org>
- */
-#define FIC_VID 0x1457
-#define FIC_NEO1973_DEBUG_PID 0x5118
-
-/*
- * RATOC REX-USB60F
- */
-#define RATOC_VENDOR_ID 0x0584
-#define RATOC_PRODUCT_ID_USB60F 0xb020
-
-/*
- * DIEBOLD BCS SE923
- */
-#define DIEBOLD_BCS_SE923_PID 0xfb99
-
-/*
- * Atmel STK541
- */
-#define ATMEL_VID 0x03eb /* Vendor ID */
-#define STK541_PID 0x2109 /* Zigbee Controller */
-
-/*
- * Dresden Elektronic Sensor Terminal Board
- */
-#define DE_VID 0x1cf1 /* Vendor ID */
-#define STB_PID 0x0001 /* Sensor Terminal Board */
-#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
-
-/*
- * Blackfin gnICE JTAG
- * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
- */
-#define ADI_VID 0x0456
-#define ADI_GNICE_PID 0xF000
-#define ADI_GNICEPLUS_PID 0xF001
-
-/*
- * JETI SPECTROMETER SPECBOS 1201
- * http://www.jeti.com/products/sys/scb/scb1201.php
- */
-#define JETI_VID 0x0c6c
-#define JETI_SPC1201_PID 0x04b2
-
-/*
- * Marvell SheevaPlug
- */
-#define MARVELL_VID 0x9e88
-#define MARVELL_SHEEVAPLUG_PID 0x9e8f
-
-#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
-
-/*
- * GN Otometrics (http://www.otometrics.com)
- * Submitted by Ville Sundberg.
- */
-#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
-#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
-
-/*
- * Bayer Ascensia Contour blood glucose meter USB-converter cable.
- * http://winglucofacts.com/cables/
- */
-#define BAYER_VID 0x1A79
-#define BAYER_CONTOUR_CABLE_PID 0x6001
-
-/*
- * Marvell OpenRD Base, Client
- * http://www.open-rd.org
- * OpenRD Base, Client use VID 0x0403
- */
-#define MARVELL_OPENRD_PID 0x9e90
-
-/*
- * Hameg HO820 and HO870 interface (using VID 0x0403)
- */
-#define HAMEG_HO820_PID 0xed74
-#define HAMEG_HO870_PID 0xed71
/*
* BmRequestType: 1100 0000b
@@ -1504,4 +554,3 @@ typedef enum {
* B2..7 Length of message - (not including Byte 0)
*
*/
-
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
new file mode 100644
index 0000000..c8951ae
--- /dev/null
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -0,0 +1,1004 @@
+/*
+ * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
+ * Please keep numerically sorted within individual areas, thanks!
+ *
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
+ * from Rudolf Gugler
+ *
+ */
+
+
+/**********************************/
+/***** devices using FTDI VID *****/
+/**********************************/
+
+
+#define FTDI_VID 0x0403 /* Vendor Id */
+
+
+/*** "original" FTDI device PIDs ***/
+
+#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
+#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
+#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
+
+
+/*** third-party PIDs (using FTDI_VID) ***/
+
+/*
+ * Marvell OpenRD Base, Client
+ * http://www.open-rd.org
+ * OpenRD Base, Client use VID 0x0403
+ */
+#define MARVELL_OPENRD_PID 0x9e90
+
+/* www.candapter.com Ewert Energy Systems CANdapter device */
+#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
+#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+
+/* OOCDlink by Joern Kaipf <joernk@web.de>
+ * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
+#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
+
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+
+#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+
+/* OpenDCC (www.opendcc.de) product id */
+#define FTDI_OPENDCC_PID 0xBFD8
+#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
+#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+
+/*
+ * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
+ */
+#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
+
+/* DMX4ALL DMX Interfaces */
+#define FTDI_DMX4ALL 0xC850
+
+/*
+ * ASK.fr devices
+ */
+#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
+
+/* www.starting-point-systems.com µChameleon device */
+#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
+
+/*
+ * Tactrix OpenPort (ECU) devices.
+ * OpenPort 1.3M submitted by Donour Sizemore.
+ * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
+ */
+#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
+#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
+#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
+
+/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
+/* the VID is the standard ftdi vid (FTDI_VID) */
+#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
+#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
+#define FTDI_SCS_DEVICE_2_PID 0xD012
+#define FTDI_SCS_DEVICE_3_PID 0xD013
+#define FTDI_SCS_DEVICE_4_PID 0xD014
+#define FTDI_SCS_DEVICE_5_PID 0xD015
+#define FTDI_SCS_DEVICE_6_PID 0xD016
+#define FTDI_SCS_DEVICE_7_PID 0xD017
+
+/* iPlus device */
+#define FTDI_IPLUS_PID 0xD070 /* Product Id */
+#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
+
+/*
+ * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
+ */
+#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
+
+/* Propox devices */
+#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+
+/*
+ * Xsens Technologies BV products (http://www.xsens.com).
+ */
+#define XSENS_CONVERTER_0_PID 0xD388
+#define XSENS_CONVERTER_1_PID 0xD389
+#define XSENS_CONVERTER_2_PID 0xD38A
+#define XSENS_CONVERTER_3_PID 0xD38B
+#define XSENS_CONVERTER_4_PID 0xD38C
+#define XSENS_CONVERTER_5_PID 0xD38D
+#define XSENS_CONVERTER_6_PID 0xD38E
+#define XSENS_CONVERTER_7_PID 0xD38F
+
+/*
+ * NDI (www.ndigital.com) product ids
+ */
+#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
+#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
+#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
+#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
+#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
+
+/*
+ * Westrex International devices submitted by Cory Lee
+ */
+#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
+#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
+
+/*
+ * ACG Identification Technologies GmbH products (http://www.acg.de/).
+ * Submitted by anton -at- goto10 -dot- org.
+ */
+#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
+
+/*
+ * Definitions for Artemis astronomical USB based cameras
+ * Check it at http://www.artemisccd.co.uk/
+ */
+#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
+
+/*
+ * Definitions for ATIK Instruments astronomical USB based cameras
+ * Check it at http://www.atik-instruments.com/
+ */
+#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
+#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
+#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
+#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
+#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
+
+/*
+ * Yost Engineering, Inc. products (www.yostengineering.com).
+ * PID 0xE050 submitted by Aaron Prose.
+ */
+#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
+
+/*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+ * All of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+ * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
+#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
+#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
+#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
+#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
+#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
+#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
+#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
+#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
+#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
+#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
+#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
+#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
+#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
+#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
+#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
+#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
+#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
+#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
+#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
+#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
+#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
+#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
+#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
+#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
+#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
+#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
+#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
+#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
+#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
+#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
+#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
+/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
+ * MS Windows, rather than the FTDI Virtual Com Port drivers.
+ * Maybe these will be easier to use with the libftdi/libusb user-space
+ * drivers, or possibly the Comedi drivers in some cases. */
+#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
+#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
+#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
+
+/*
+ * EVER Eco Pro UPS (http://www.ever.com.pl/)
+ */
+
+#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
+
+/*
+ * Active Robots product ids.
+ */
+#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
+
+/* Pyramid Computer GmbH */
+#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
+
+/* www.elsterelectricity.com Elster Unicom III Optical Probe */
+#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
+
+/*
+ * Gude Analog- und Digitalsysteme GmbH
+ */
+#define FTDI_GUDEADS_E808_PID 0xE808
+#define FTDI_GUDEADS_E809_PID 0xE809
+#define FTDI_GUDEADS_E80A_PID 0xE80A
+#define FTDI_GUDEADS_E80B_PID 0xE80B
+#define FTDI_GUDEADS_E80C_PID 0xE80C
+#define FTDI_GUDEADS_E80D_PID 0xE80D
+#define FTDI_GUDEADS_E80E_PID 0xE80E
+#define FTDI_GUDEADS_E80F_PID 0xE80F
+#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
+#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
+#define FTDI_GUDEADS_E88A_PID 0xE88A
+#define FTDI_GUDEADS_E88B_PID 0xE88B
+#define FTDI_GUDEADS_E88C_PID 0xE88C
+#define FTDI_GUDEADS_E88D_PID 0xE88D
+#define FTDI_GUDEADS_E88E_PID 0xE88E
+#define FTDI_GUDEADS_E88F_PID 0xE88F
+
+/*
+ * Eclo (http://www.eclo.pt/) product IDs.
+ * PID 0xEA90 submitted by Martin Grill.
+ */
+#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
+
+/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
+#define FTDI_TNC_X_PID 0xEBE0
+
+/*
+ * Teratronik product ids.
+ * Submitted by O. Wölfelschneider.
+ */
+#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
+#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
+
+/* Rig Expert Ukraine devices */
+#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
+
+/*
+ * Hameg HO820 and HO870 interface (using VID 0x0403)
+ */
+#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO870_PID 0xed71
+
+/*
+ * MaxStream devices www.maxstream.net
+ */
+#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
+
+/*
+ * microHAM product IDs (http://www.microham.com).
+ * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
+ * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
+ * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
+ */
+#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
+#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
+#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
+#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
+#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
+#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
+#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
+#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
+
+/* Domintell products http://www.domintell.com */
+#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
+#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
+
+/*
+ * The following are the values for the Perle Systems
+ * UltraPort USB serial converters
+ */
+#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
+
+/* Sprog II (Andrew Crosland's SprogII DCC interface) */
+#define FTDI_SPROG_II 0xF0C8
+
+/* an infrared receiver for user access control with IR tags */
+#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
+
+/* ACT Solutions HomePro ZWave interface
+ (http://www.act-solutions.com/HomePro.htm) */
+#define FTDI_ACTZWAVE_PID 0xF2D0
+
+/*
+ * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
+ * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
+ * and I'm not entirely sure which are used by which.
+ */
+#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
+#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
+
+/*
+ * Linx Technologies product ids
+ */
+#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
+#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
+#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
+#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
+
+/*
+ * Oceanic product ids
+ */
+#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
+
+/*
+ * SUUNTO product ids
+ */
+#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
+
+/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
+/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
+#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
+
+/* CCS Inc. ICDU/ICDU40 product ID -
+ * the FT232BM is used in an in-circuit-debugger unit for PIC16's/PIC18's */
+#define FTDI_CCSICDU20_0_PID 0xF9D0
+#define FTDI_CCSICDU40_1_PID 0xF9D1
+#define FTDI_CCSMACHX_2_PID 0xF9D2
+#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
+#define FTDI_CCSICDU64_4_PID 0xF9D4
+#define FTDI_CCSPRIME8_5_PID 0xF9D5
+
+/*
+ * The following are the values for the Matrix Orbital LCD displays,
+ * which are the FT232BM ( similar to the 8U232AM )
+ */
+#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
+#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
+
+/*
+ * Home Electronics (www.home-electro.com) USB gadgets
+ */
+#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
+
+/* Inside Accesso contactless reader (http://www.insidefr.com) */
+#define INSIDE_ACCESSO 0xFAD0
+
+/*
+ * ThorLabs USB motor drivers
+ */
+#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
+
+/*
+ * Protego product ids
+ */
+#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
+#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
+#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
+#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
+
+/*
+ * DSS-20 Sync Station for Sony Ericsson P800
+ */
+#define FTDI_DSS20_PID 0xFC82
+
+/* www.irtrans.de device */
+#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
+
+/*
+ * RM Michaelides CANview USB (http://www.rmcan.com) (FTDI_VID)
+ * CAN fieldbus interface adapter, added by port GmbH www.port.de)
+ * Ian Abbott changed the macro names for consistency.
+ */
+#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
+/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
+#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
+
+#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 (FTDI_VID) */
+
+#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
+
+/*
+ * PCDJ use ftdi based dj-controllers. The following PID is
+ * for their DAC-2 device http://www.pcdjhardware.com/DAC2.asp
+ * (the VID is the standard ftdi vid (FTDI_VID), PID sent by Wouter Paesen)
+ */
+#define FTDI_PCDJ_DAC2_PID 0xFA88
+
+#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG (FTDI_VID) */
+
+/*
+ * DIEBOLD BCS SE923 (FTDI_VID)
+ */
+#define DIEBOLD_BCS_SE923_PID 0xfb99
+
+/* www.crystalfontz.com devices
+ * - thanx for providing free devices for evaluation !
+ * they use the ftdi chipset for the USB interface
+ * and the vendor id is the same
+ */
+#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
+#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
+#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
+#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
+#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
+#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
+#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
+#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
+
+/*
+ * Video Networks Limited / Homechoice in the UK use an ftdi-based device
+ * for their 1Mb broadband internet service. The following PID is exhibited
+ * by the usb device supplied (the VID is the standard ftdi vid (FTDI_VID)
+ */
+#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
+
+/* AlphaMicro Components AMC-232USB01 device (FTDI_VID) */
+#define FTDI_AMC232_PID 0xFF00 /* Product Id */
+
+/*
+ * IBS elektronik product ids (FTDI_VID)
+ * Submitted by Thomas Schleusener
+ */
+#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
+#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
+#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
+#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
+#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
+#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
+#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
+#define FTDI_IBS_PROD_PID 0xff3f /* future device */
+/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
+#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
+
+
+
+/********************************/
+/** third-party VID/PID combos **/
+/********************************/
+
+
+
+/*
+ * Atmel STK541
+ */
+#define ATMEL_VID 0x03eb /* Vendor ID */
+#define STK541_PID 0x2109 /* Zigbee Controller */
+
+/*
+ * Blackfin gnICE JTAG
+ * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
+ */
+#define ADI_VID 0x0456
+#define ADI_GNICE_PID 0xF000
+#define ADI_GNICEPLUS_PID 0xF001
+
+/*
+ * RATOC REX-USB60F
+ */
+#define RATOC_VENDOR_ID 0x0584
+#define RATOC_PRODUCT_ID_USB60F 0xb020
+
+/*
+ * Definitions for B&B Electronics products.
+ */
+#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
+#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
+#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
+#define BANDB_USOPTL4_PID 0xAC11
+#define BANDB_USPTL4_PID 0xAC12
+#define BANDB_USO9ML2DR_2_PID 0xAC16
+#define BANDB_USO9ML2DR_PID 0xAC17
+#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
+#define BANDB_USOPTL4DR_PID 0xAC19
+#define BANDB_485USB9F_2W_PID 0xAC25
+#define BANDB_485USB9F_4W_PID 0xAC26
+#define BANDB_232USB9M_PID 0xAC27
+#define BANDB_485USBTB_2W_PID 0xAC33
+#define BANDB_485USBTB_4W_PID 0xAC34
+#define BANDB_TTL5USB9M_PID 0xAC49
+#define BANDB_TTL3USB9M_PID 0xAC50
+#define BANDB_ZZ_PROG1_USB_PID 0xBA02
+
+/*
+ * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
+ */
+#define INTREPID_VID 0x093C
+#define INTREPID_VALUECAN_PID 0x0601
+#define INTREPID_NEOVI_PID 0x0701
+
+/*
+ * Definitions for ID TECH (www.idt-net.com) devices
+ */
+#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
+#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
+
+/*
+ * Definitions for Omnidirectional Control Technology, Inc. devices
+ */
+#define OCT_VID 0x0B39 /* OCT vendor ID */
+/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
+/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
+/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
+#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
+
+/*
+ * Icom ID-1 digital transceiver
+ */
+
+#define ICOM_ID1_VID 0x0C26
+#define ICOM_ID1_PID 0x0004
+
+/*
+ * GN Otometrics (http://www.otometrics.com)
+ * Submitted by Ville Sundberg.
+ */
+#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
+#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
+
+/*
+ * The following are the values for the Sealevel SeaLINK+ adapters.
+ * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
+ * removed some PIDs that don't seem to match any existing products.)
+ */
+#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
+#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
+#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
+#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
+#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
+#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
+#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
+#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
+#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
+#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
+#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
+#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
+#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
+#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
+#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
+#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
+#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
+#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
+#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
+#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
+#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
+#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
+#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
+#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
+#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
+#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
+#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
+#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
+#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
+#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
+#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
+#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
+#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
+#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
+#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
+#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
+#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
+#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
+#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
+#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
+#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
+#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
+#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
+#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
+#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
+#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
+#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
+#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
+
+/*
+ * JETI SPECTROMETER SPECBOS 1201
+ * http://www.jeti.com/products/sys/scb/scb1201.php
+ */
+#define JETI_VID 0x0c6c
+#define JETI_SPC1201_PID 0x04b2
+
+/*
+ * FTDI USB UART chips used in construction projects from the
+ * Elektor Electronics magazine (http://elektor-electronics.co.uk)
+ */
+#define ELEKTOR_VID 0x0C7D
+#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
+
+/*
+ * Posiflex inc retail equipment (http://www.posiflex.com.tw)
+ */
+#define POSIFLEX_VID 0x0d3a /* Vendor ID */
+#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
+
+/*
+ * The following are the values for two KOBIL chipcard terminals.
+ */
+#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
+#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
+#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
+
+#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
+#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
+
+/*
+ * Falcom Wireless Communications GmbH
+ */
+#define FALCOM_VID 0x0F94 /* Vendor Id */
+#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
+#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
+
+/* Larsen and Brusgaard AltiTrack/USBtrack */
+#define LARSENBRUSGAARD_VID 0x0FD8
+#define LB_ALTITRACK_PID 0x0001
+
+/*
+ * TTi (Thurlby Thandar Instruments)
+ */
+#define TTI_VID 0x103E /* Vendor Id */
+#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
+
+/* Interbiometrics USB I/O Board */
+/* Developed for Interbiometrics by Rudolf Gugler */
+#define INTERBIOMETRICS_VID 0x1209
+#define INTERBIOMETRICS_IOBOARD_PID 0x1002
+#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
+
+/*
+ * Testo products (http://www.testo.com/)
+ * Submitted by Colin Leroy
+ */
+#define TESTO_VID 0x128D
+#define TESTO_USB_INTERFACE_PID 0x0001
+
+/*
+ * Mobility Electronics products.
+ */
+#define MOBILITY_VID 0x1342
+#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
+
+/*
+ * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
+ * Submitted by Harald Welte <laforge@openmoko.org>
+ */
+#define FIC_VID 0x1457
+#define FIC_NEO1973_DEBUG_PID 0x5118
+
+/* Olimex */
+#define OLIMEX_VID 0x15BA
+#define OLIMEX_ARM_USB_OCD_PID 0x0003
+
+/*
+ * Telldus Technologies
+ */
+#define TELLDUS_VID 0x1781 /* Vendor ID */
+#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
+
+/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+#define BAYER_VID 0x1A79
+#define BAYER_CONTOUR_CABLE_PID 0x6001
+
+/*
+ * The following are the values for the Matrix Orbital FTDI Range
+ * Anything in this range will use an FT232RL.
+ */
+#define MTXORB_VID 0x1B3D
+#define MTXORB_FTDI_RANGE_0100_PID 0x0100
+#define MTXORB_FTDI_RANGE_0101_PID 0x0101
+#define MTXORB_FTDI_RANGE_0102_PID 0x0102
+#define MTXORB_FTDI_RANGE_0103_PID 0x0103
+#define MTXORB_FTDI_RANGE_0104_PID 0x0104
+#define MTXORB_FTDI_RANGE_0105_PID 0x0105
+#define MTXORB_FTDI_RANGE_0106_PID 0x0106
+#define MTXORB_FTDI_RANGE_0107_PID 0x0107
+#define MTXORB_FTDI_RANGE_0108_PID 0x0108
+#define MTXORB_FTDI_RANGE_0109_PID 0x0109
+#define MTXORB_FTDI_RANGE_010A_PID 0x010A
+#define MTXORB_FTDI_RANGE_010B_PID 0x010B
+#define MTXORB_FTDI_RANGE_010C_PID 0x010C
+#define MTXORB_FTDI_RANGE_010D_PID 0x010D
+#define MTXORB_FTDI_RANGE_010E_PID 0x010E
+#define MTXORB_FTDI_RANGE_010F_PID 0x010F
+#define MTXORB_FTDI_RANGE_0110_PID 0x0110
+#define MTXORB_FTDI_RANGE_0111_PID 0x0111
+#define MTXORB_FTDI_RANGE_0112_PID 0x0112
+#define MTXORB_FTDI_RANGE_0113_PID 0x0113
+#define MTXORB_FTDI_RANGE_0114_PID 0x0114
+#define MTXORB_FTDI_RANGE_0115_PID 0x0115
+#define MTXORB_FTDI_RANGE_0116_PID 0x0116
+#define MTXORB_FTDI_RANGE_0117_PID 0x0117
+#define MTXORB_FTDI_RANGE_0118_PID 0x0118
+#define MTXORB_FTDI_RANGE_0119_PID 0x0119
+#define MTXORB_FTDI_RANGE_011A_PID 0x011A
+#define MTXORB_FTDI_RANGE_011B_PID 0x011B
+#define MTXORB_FTDI_RANGE_011C_PID 0x011C
+#define MTXORB_FTDI_RANGE_011D_PID 0x011D
+#define MTXORB_FTDI_RANGE_011E_PID 0x011E
+#define MTXORB_FTDI_RANGE_011F_PID 0x011F
+#define MTXORB_FTDI_RANGE_0120_PID 0x0120
+#define MTXORB_FTDI_RANGE_0121_PID 0x0121
+#define MTXORB_FTDI_RANGE_0122_PID 0x0122
+#define MTXORB_FTDI_RANGE_0123_PID 0x0123
+#define MTXORB_FTDI_RANGE_0124_PID 0x0124
+#define MTXORB_FTDI_RANGE_0125_PID 0x0125
+#define MTXORB_FTDI_RANGE_0126_PID 0x0126
+#define MTXORB_FTDI_RANGE_0127_PID 0x0127
+#define MTXORB_FTDI_RANGE_0128_PID 0x0128
+#define MTXORB_FTDI_RANGE_0129_PID 0x0129
+#define MTXORB_FTDI_RANGE_012A_PID 0x012A
+#define MTXORB_FTDI_RANGE_012B_PID 0x012B
+#define MTXORB_FTDI_RANGE_012C_PID 0x012C
+#define MTXORB_FTDI_RANGE_012D_PID 0x012D
+#define MTXORB_FTDI_RANGE_012E_PID 0x012E
+#define MTXORB_FTDI_RANGE_012F_PID 0x012F
+#define MTXORB_FTDI_RANGE_0130_PID 0x0130
+#define MTXORB_FTDI_RANGE_0131_PID 0x0131
+#define MTXORB_FTDI_RANGE_0132_PID 0x0132
+#define MTXORB_FTDI_RANGE_0133_PID 0x0133
+#define MTXORB_FTDI_RANGE_0134_PID 0x0134
+#define MTXORB_FTDI_RANGE_0135_PID 0x0135
+#define MTXORB_FTDI_RANGE_0136_PID 0x0136
+#define MTXORB_FTDI_RANGE_0137_PID 0x0137
+#define MTXORB_FTDI_RANGE_0138_PID 0x0138
+#define MTXORB_FTDI_RANGE_0139_PID 0x0139
+#define MTXORB_FTDI_RANGE_013A_PID 0x013A
+#define MTXORB_FTDI_RANGE_013B_PID 0x013B
+#define MTXORB_FTDI_RANGE_013C_PID 0x013C
+#define MTXORB_FTDI_RANGE_013D_PID 0x013D
+#define MTXORB_FTDI_RANGE_013E_PID 0x013E
+#define MTXORB_FTDI_RANGE_013F_PID 0x013F
+#define MTXORB_FTDI_RANGE_0140_PID 0x0140
+#define MTXORB_FTDI_RANGE_0141_PID 0x0141
+#define MTXORB_FTDI_RANGE_0142_PID 0x0142
+#define MTXORB_FTDI_RANGE_0143_PID 0x0143
+#define MTXORB_FTDI_RANGE_0144_PID 0x0144
+#define MTXORB_FTDI_RANGE_0145_PID 0x0145
+#define MTXORB_FTDI_RANGE_0146_PID 0x0146
+#define MTXORB_FTDI_RANGE_0147_PID 0x0147
+#define MTXORB_FTDI_RANGE_0148_PID 0x0148
+#define MTXORB_FTDI_RANGE_0149_PID 0x0149
+#define MTXORB_FTDI_RANGE_014A_PID 0x014A
+#define MTXORB_FTDI_RANGE_014B_PID 0x014B
+#define MTXORB_FTDI_RANGE_014C_PID 0x014C
+#define MTXORB_FTDI_RANGE_014D_PID 0x014D
+#define MTXORB_FTDI_RANGE_014E_PID 0x014E
+#define MTXORB_FTDI_RANGE_014F_PID 0x014F
+#define MTXORB_FTDI_RANGE_0150_PID 0x0150
+#define MTXORB_FTDI_RANGE_0151_PID 0x0151
+#define MTXORB_FTDI_RANGE_0152_PID 0x0152
+#define MTXORB_FTDI_RANGE_0153_PID 0x0153
+#define MTXORB_FTDI_RANGE_0154_PID 0x0154
+#define MTXORB_FTDI_RANGE_0155_PID 0x0155
+#define MTXORB_FTDI_RANGE_0156_PID 0x0156
+#define MTXORB_FTDI_RANGE_0157_PID 0x0157
+#define MTXORB_FTDI_RANGE_0158_PID 0x0158
+#define MTXORB_FTDI_RANGE_0159_PID 0x0159
+#define MTXORB_FTDI_RANGE_015A_PID 0x015A
+#define MTXORB_FTDI_RANGE_015B_PID 0x015B
+#define MTXORB_FTDI_RANGE_015C_PID 0x015C
+#define MTXORB_FTDI_RANGE_015D_PID 0x015D
+#define MTXORB_FTDI_RANGE_015E_PID 0x015E
+#define MTXORB_FTDI_RANGE_015F_PID 0x015F
+#define MTXORB_FTDI_RANGE_0160_PID 0x0160
+#define MTXORB_FTDI_RANGE_0161_PID 0x0161
+#define MTXORB_FTDI_RANGE_0162_PID 0x0162
+#define MTXORB_FTDI_RANGE_0163_PID 0x0163
+#define MTXORB_FTDI_RANGE_0164_PID 0x0164
+#define MTXORB_FTDI_RANGE_0165_PID 0x0165
+#define MTXORB_FTDI_RANGE_0166_PID 0x0166
+#define MTXORB_FTDI_RANGE_0167_PID 0x0167
+#define MTXORB_FTDI_RANGE_0168_PID 0x0168
+#define MTXORB_FTDI_RANGE_0169_PID 0x0169
+#define MTXORB_FTDI_RANGE_016A_PID 0x016A
+#define MTXORB_FTDI_RANGE_016B_PID 0x016B
+#define MTXORB_FTDI_RANGE_016C_PID 0x016C
+#define MTXORB_FTDI_RANGE_016D_PID 0x016D
+#define MTXORB_FTDI_RANGE_016E_PID 0x016E
+#define MTXORB_FTDI_RANGE_016F_PID 0x016F
+#define MTXORB_FTDI_RANGE_0170_PID 0x0170
+#define MTXORB_FTDI_RANGE_0171_PID 0x0171
+#define MTXORB_FTDI_RANGE_0172_PID 0x0172
+#define MTXORB_FTDI_RANGE_0173_PID 0x0173
+#define MTXORB_FTDI_RANGE_0174_PID 0x0174
+#define MTXORB_FTDI_RANGE_0175_PID 0x0175
+#define MTXORB_FTDI_RANGE_0176_PID 0x0176
+#define MTXORB_FTDI_RANGE_0177_PID 0x0177
+#define MTXORB_FTDI_RANGE_0178_PID 0x0178
+#define MTXORB_FTDI_RANGE_0179_PID 0x0179
+#define MTXORB_FTDI_RANGE_017A_PID 0x017A
+#define MTXORB_FTDI_RANGE_017B_PID 0x017B
+#define MTXORB_FTDI_RANGE_017C_PID 0x017C
+#define MTXORB_FTDI_RANGE_017D_PID 0x017D
+#define MTXORB_FTDI_RANGE_017E_PID 0x017E
+#define MTXORB_FTDI_RANGE_017F_PID 0x017F
+#define MTXORB_FTDI_RANGE_0180_PID 0x0180
+#define MTXORB_FTDI_RANGE_0181_PID 0x0181
+#define MTXORB_FTDI_RANGE_0182_PID 0x0182
+#define MTXORB_FTDI_RANGE_0183_PID 0x0183
+#define MTXORB_FTDI_RANGE_0184_PID 0x0184
+#define MTXORB_FTDI_RANGE_0185_PID 0x0185
+#define MTXORB_FTDI_RANGE_0186_PID 0x0186
+#define MTXORB_FTDI_RANGE_0187_PID 0x0187
+#define MTXORB_FTDI_RANGE_0188_PID 0x0188
+#define MTXORB_FTDI_RANGE_0189_PID 0x0189
+#define MTXORB_FTDI_RANGE_018A_PID 0x018A
+#define MTXORB_FTDI_RANGE_018B_PID 0x018B
+#define MTXORB_FTDI_RANGE_018C_PID 0x018C
+#define MTXORB_FTDI_RANGE_018D_PID 0x018D
+#define MTXORB_FTDI_RANGE_018E_PID 0x018E
+#define MTXORB_FTDI_RANGE_018F_PID 0x018F
+#define MTXORB_FTDI_RANGE_0190_PID 0x0190
+#define MTXORB_FTDI_RANGE_0191_PID 0x0191
+#define MTXORB_FTDI_RANGE_0192_PID 0x0192
+#define MTXORB_FTDI_RANGE_0193_PID 0x0193
+#define MTXORB_FTDI_RANGE_0194_PID 0x0194
+#define MTXORB_FTDI_RANGE_0195_PID 0x0195
+#define MTXORB_FTDI_RANGE_0196_PID 0x0196
+#define MTXORB_FTDI_RANGE_0197_PID 0x0197
+#define MTXORB_FTDI_RANGE_0198_PID 0x0198
+#define MTXORB_FTDI_RANGE_0199_PID 0x0199
+#define MTXORB_FTDI_RANGE_019A_PID 0x019A
+#define MTXORB_FTDI_RANGE_019B_PID 0x019B
+#define MTXORB_FTDI_RANGE_019C_PID 0x019C
+#define MTXORB_FTDI_RANGE_019D_PID 0x019D
+#define MTXORB_FTDI_RANGE_019E_PID 0x019E
+#define MTXORB_FTDI_RANGE_019F_PID 0x019F
+#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
+#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
+#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
+#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
+#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
+#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
+#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
+#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
+#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
+#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
+#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
+#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
+#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
+#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
+#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
+#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
+#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
+#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
+#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
+#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
+#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
+#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
+#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
+#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
+#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
+#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
+#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
+#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
+#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
+#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
+#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
+#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
+#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
+#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
+#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
+#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
+#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
+#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
+#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
+#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
+#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
+#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
+#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
+#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
+#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
+#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
+#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
+#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
+#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
+#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
+#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
+#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
+#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
+#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
+#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
+#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
+#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
+#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
+#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
+#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
+#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
+#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
+#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
+#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
+#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
+#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
+#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
+#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
+#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
+#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
+#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
+#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
+#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
+#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
+#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
+#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
+#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
+#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
+#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
+#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
+#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
+#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
+#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
+#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
+#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
+#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
+#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
+#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
+#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
+#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
+#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
+#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
+#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
+#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
+#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
+#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
+
+
+
+/*
+ * The Mobility Lab (TML)
+ * Submitted by Pierre Castella
+ */
+#define TML_VID 0x1B91 /* Vendor ID */
+#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
+
+/* Alti-2 products http://www.alti-2.com */
+#define ALTI2_VID 0x1BC9
+#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
+
+/*
+ * Dresden Elektronic Sensor Terminal Board
+ */
+#define DE_VID 0x1cf1 /* Vendor ID */
+#define STB_PID 0x0001 /* Sensor Terminal Board */
+#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
+
+/*
+ * Papouch products (http://www.papouch.com/)
+ * Submitted by Folkert van Heusden
+ */
+
+#define PAPOUCH_VID 0x5050 /* Vendor ID */
+#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
+#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
+#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
+
+/*
+ * Marvell SheevaPlug
+ */
+#define MARVELL_VID 0x9e88
+#define MARVELL_SHEEVAPLUG_PID 0x9e8f
+
+/*
+ * Evolution Robotics products (http://www.evolution.com/).
+ * Submitted by Shawn M. Lavelle.
+ */
+#define EVOLUTION_VID 0xDEEE /* Vendor ID */
+#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
+#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
+#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
+#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index bbe005c..83443d6 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -276,7 +276,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
if (port->write_urb_busy)
start_io = false;
else {
- start_io = (__kfifo_len(port->write_fifo) != 0);
+ start_io = (kfifo_len(&port->write_fifo) != 0);
port->write_urb_busy = start_io;
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -285,7 +285,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
return 0;
data = port->write_urb->transfer_buffer;
- count = kfifo_get(port->write_fifo, data, port->bulk_out_size);
+ count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock);
usb_serial_debug_data(debug, &port->dev, __func__, count, data);
/* set up our urb */
@@ -345,7 +345,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
return usb_serial_multi_urb_write(tty, port,
buf, count);
- count = kfifo_put(port->write_fifo, buf, count);
+ count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port);
if (result >= 0)
@@ -370,7 +370,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
(serial->type->max_in_flight_urbs -
port->urbs_in_flight);
} else if (serial->num_bulk_out)
- room = port->write_fifo->size - __kfifo_len(port->write_fifo);
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -386,12 +386,12 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
dbg("%s - port %d", __func__, port->number);
- if (serial->type->max_in_flight_urbs) {
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
+ if (serial->type->max_in_flight_urbs)
chars = port->tx_bytes_flight;
- spin_unlock_irqrestore(&port->lock, flags);
- } else if (serial->num_bulk_out)
- chars = kfifo_len(port->write_fifo);
+ else if (serial->num_bulk_out)
+ chars = kfifo_len(&port->write_fifo);
+ spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
@@ -489,6 +489,8 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
if (port->serial->type->max_in_flight_urbs) {
+ kfree(urb->transfer_buffer);
+
spin_lock_irqsave(&port->lock, flags);
--port->urbs_in_flight;
port->tx_bytes_flight -= urb->transfer_buffer_length;
@@ -507,7 +509,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - nonzero multi-urb write bulk status "
"received: %d", __func__, status);
- kfifo_reset(port->write_fifo);
+ kfifo_reset_out(&port->write_fifo);
} else
usb_serial_generic_write_start(port);
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 485fa9c..2cfe245 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -127,8 +127,9 @@
#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -191,6 +192,7 @@ static struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
@@ -207,6 +209,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9a2b903..6e94a67 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -340,6 +340,10 @@ static int option_resume(struct usb_serial *serial);
#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+/* Haier products */
+#define HAIER_VENDOR_ID 0x201e
+#define HAIER_PRODUCT_CE100 0x2009
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -641,6 +645,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b644..3eb6143 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
};
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 4543f35..33c85f7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -595,8 +595,7 @@ static void port_release(struct device *dev)
usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
- if (!IS_ERR(port->write_fifo) && port->write_fifo)
- kfifo_free(port->write_fifo);
+ kfifo_free(&port->write_fifo);
kfree(port->bulk_in_buffer);
kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
@@ -939,9 +938,7 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL,
- &port->lock);
- if (IS_ERR(port->write_fifo))
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 64a0a2c..49575fba 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
"Microtech",
"USB-SCSI-DB25",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
@@ -1807,13 +1807,6 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
-/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
-UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
- "INTOVA",
- "Pixtreme",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
* Mio Moov 330
*/
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5a53d4f..e9f9954 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -434,7 +434,8 @@ static void adjust_quirks(struct us_data *us)
u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
unsigned f = 0;
- unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
+ unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
+ US_FL_FIX_CAPACITY |
US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 2051c9d..b7687c5 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2245,9 +2245,6 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
if (regno > 255)
return 1;
- if (regno > 255)
- return 1;
-
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4c10edecf..86d95c22 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -85,7 +85,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
return error ? data->current_brightness : reg_val;
}
-static struct backlight_ops adp5520_bl_ops = {
+static const struct backlight_ops adp5520_bl_ops = {
.update_status = adp5520_bl_update_status,
.get_brightness = adp5520_bl_get_brightness,
};
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 2c3bdfc..d769b0b 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
return 1;
}
-static struct backlight_ops adx_backlight_ops = {
+static const struct backlight_ops adx_backlight_ops = {
.options = 0,
.update_status = adx_backlight_update_status,
.get_brightness = adx_backlight_get_brightness,
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 2cf7ba5..f625ffc 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
return pwm_channel_enable(&pwmbl->pwmc);
}
-static struct backlight_ops atmel_pwm_bl_ops = {
+static const struct backlight_ops atmel_pwm_bl_ops = {
.get_brightness = atmel_pwm_bl_get_intensity,
.update_status = atmel_pwm_bl_set_intensity,
};
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 6615ac7..18829cf 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
* ERR_PTR() or a pointer to the newly allocated device.
*/
struct backlight_device *backlight_device_register(const char *name,
- struct device *parent, void *devdata, struct backlight_ops *ops)
+ struct device *parent, void *devdata, const struct backlight_ops *ops)
{
struct backlight_device *new_bd;
int rc;
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 9677494..b4bcf80 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
}
EXPORT_SYMBOL(corgi_lcd_limit_intensity);
-static struct backlight_ops corgi_bl_ops = {
+static const struct backlight_ops corgi_bl_ops = {
.get_brightness = corgi_bl_get_intensity,
.update_status = corgi_bl_update_status,
};
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index b9fe62b..da86db4 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
return intensity;
}
-static struct backlight_ops cr_backlight_ops = {
+static const struct backlight_ops cr_backlight_ops = {
.get_brightness = cr_backlight_get_intensity,
.update_status = cr_backlight_set_intensity,
};
@@ -201,7 +201,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
if (IS_ERR(ldp)) {
backlight_device_unregister(bdp);
pci_dev_put(lpc_dev);
- return PTR_ERR(bdp);
+ return PTR_ERR(ldp);
}
pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR,
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index f2d76da..74cdc64 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -95,7 +95,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
return data->current_brightness;
}
-static struct backlight_ops da903x_backlight_ops = {
+static const struct backlight_ops da903x_backlight_ops = {
.update_status = da903x_backlight_update_status,
.get_brightness = da903x_backlight_get_brightness,
};
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 6d27f62..e6d348e 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
}
EXPORT_SYMBOL(corgibl_limit_intensity);
-static struct backlight_ops genericbl_ops = {
+static const struct backlight_ops genericbl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = genericbl_get_intensity,
.update_status = genericbl_send_intensity,
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 7fb4eef..f7cc528 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
return current_intensity;
}
-static struct backlight_ops hp680bl_ops = {
+static const struct backlight_ops hp680bl_ops = {
.get_brightness = hp680bl_get_intensity,
.update_status = hp680bl_set_intensity,
};
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 7aed256..db9071f 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -93,7 +93,7 @@ out:
return ret;
}
-static struct backlight_ops jornada_bl_ops = {
+static const struct backlight_ops jornada_bl_ops = {
.get_brightness = jornada_bl_get_brightness,
.update_status = jornada_bl_update_status,
.options = BL_CORE_SUSPENDRESUME,
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index a38fda1..939e7b8 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
return kb3886bl_intensity;
}
-static struct backlight_ops kb3886bl_ops = {
+static const struct backlight_ops kb3886bl_ops = {
.get_brightness = kb3886bl_get_intensity,
.update_status = kb3886bl_send_intensity,
};
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 6b488b8..00a9591 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
return current_intensity;
}
-static struct backlight_ops locomobl_data = {
+static const struct backlight_ops locomobl_data = {
.get_brightness = locomolcd_get_intensity,
.update_status = locomolcd_set_intensity,
};
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9edb8d7..2e78b07 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -33,7 +33,7 @@ struct dmi_match_data {
unsigned long iostart;
unsigned long iolen;
/* Backlight operations structure. */
- struct backlight_ops backlight_ops;
+ const struct backlight_ops backlight_ops;
};
/* Module parameters. */
@@ -220,6 +220,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBookPro 5,3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 5,4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 5,5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 8693e5f..a3a7f89 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
return bl->current_intensity;
}
-static struct backlight_ops omapbl_ops = {
+static const struct backlight_ops omapbl_ops = {
.get_brightness = omapbl_get_intensity,
.update_status = omapbl_update_status,
};
@@ -139,8 +139,6 @@ static int omapbl_probe(struct platform_device *pdev)
if (!pdata)
return -ENXIO;
- omapbl_ops.check_fb = pdata->check_fb;
-
bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL);
if (unlikely(!bl))
return -ENOMEM;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 9edaf24f..075786e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
return intensity - HW_LEVEL_MIN;
}
-static struct backlight_ops progearbl_ops = {
+static const struct backlight_ops progearbl_ops = {
.get_brightness = progearbl_get_intensity,
.update_status = progearbl_set_intensity,
};
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 8871662..9d2ec2a 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -22,8 +22,10 @@
struct pwm_bl_data {
struct pwm_device *pwm;
+ struct device *dev;
unsigned int period;
- int (*notify)(int brightness);
+ int (*notify)(struct device *,
+ int brightness);
};
static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -39,7 +41,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
brightness = 0;
if (pb->notify)
- brightness = pb->notify(brightness);
+ brightness = pb->notify(pb->dev, brightness);
if (brightness == 0) {
pwm_config(pb->pwm, 0, pb->period);
@@ -56,7 +58,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
return bl->props.brightness;
}
-static struct backlight_ops pwm_backlight_ops = {
+static const struct backlight_ops pwm_backlight_ops = {
.update_status = pwm_backlight_update_status,
.get_brightness = pwm_backlight_get_brightness,
};
@@ -88,6 +90,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->period = data->pwm_period_ns;
pb->notify = data->notify;
+ pb->dev = &pdev->dev;
pb->pwm = pwm_request(data->pwm_id, "backlight");
if (IS_ERR(pb->pwm)) {
@@ -146,7 +149,7 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
if (pb->notify)
- pb->notify(0);
+ pb->notify(pb->dev, 0);
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 43edbad..e14ce4d 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
return props->brightness;
}
-static struct backlight_ops bl_ops = {
+static const struct backlight_ops bl_ops = {
.get_brightness = tosa_bl_get_brightness,
.update_status = tosa_bl_update_status,
};
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 467bdb7..e32add3 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
return data->current_brightness;
}
-static struct backlight_ops wm831x_backlight_ops = {
+static const struct backlight_ops wm831x_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = wm831x_backlight_update_status,
.get_brightness = wm831x_backlight_get_brightness,
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index da7c01b..3a561df2 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1573,15 +1573,15 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
return err;
- err = pci_request_regions(dev, name);
- if (err)
- return err;
-
err = -ENOMEM;
cfb = cyberpro_alloc_fb_info(id->driver_data, name);
if (!cfb)
goto failed_release;
+ err = pci_request_regions(dev, cfb->fb.fix.id);
+ if (err)
+ goto failed_regions;
+
cfb->dev = dev;
cfb->region = pci_ioremap_bar(dev, 0);
if (!cfb->region)
@@ -1633,10 +1633,10 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
failed:
iounmap(cfb->region);
failed_ioremap:
+ pci_release_regions(dev);
+failed_regions:
cyberpro_free_fb_info(cfb);
failed_release:
- pci_release_regions(dev);
-
return err;
}
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182..d25df51 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+static void efifb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
info->par = NULL;
info->aperture_base = efifb_fix.smem_start;
- info->aperture_size = size_total;
+ info->aperture_size = size_remap;
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 66358fa..b4b6dece 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
*/
static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
{
- struct imxfb_info *fbi = platform_get_drvdata(dev);
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct imxfb_info *fbi = info->par;
pr_debug("%s\n", __func__);
@@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
static int imxfb_resume(struct platform_device *dev)
{
- struct imxfb_info *fbi = platform_get_drvdata(dev);
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct imxfb_info *fbi = info->par;
pr_debug("%s\n", __func__);
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 054ef29..772ba3f 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
unsigned long flags;
dma_cookie_t cookie;
- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
+ if (mx3_fbi->txd)
+ dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
+ to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
+ else
+ dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
@@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
{
+ dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
/* This might be board-specific */
mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
return;
@@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev)
goto ersdc0;
}
+ mx3fb->backlight_level = 255;
+
ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
if (ret < 0)
goto eisdc0;
- mx3fb->backlight_level = 255;
-
return 0;
eisdc0:
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index c7c6455..e192b05 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -189,11 +189,6 @@ static struct {
struct omapfb_color_key color_key;
} dispc;
-static struct platform_device omapdss_device = {
- .name = "omapdss",
- .id = -1,
-};
-
static void enable_lcd_clocks(int enable);
static void inline dispc_write_reg(int idx, u32 val)
@@ -920,20 +915,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
static int get_dss_clocks(void)
{
- dispc.dss_ick = clk_get(&omapdss_device.dev, "ick");
+ dispc.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
if (IS_ERR(dispc.dss_ick)) {
dev_err(dispc.fbdev->dev, "can't get ick\n");
return PTR_ERR(dispc.dss_ick);
}
- dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck");
+ dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
if (IS_ERR(dispc.dss1_fck)) {
dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
clk_put(dispc.dss_ick);
return PTR_ERR(dispc.dss1_fck);
}
- dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck");
+ dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck");
if (IS_ERR(dispc.dss_54m_fck)) {
dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
clk_put(dispc.dss_ick);
@@ -1385,12 +1380,6 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
int skip_init = 0;
int i;
- r = platform_device_register(&omapdss_device);
- if (r) {
- dev_err(fbdev->dev, "can't register omapdss device\n");
- return r;
- }
-
memset(&dispc, 0, sizeof(dispc));
dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1534,7 +1523,6 @@ static void omap_dispc_cleanup(void)
free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
put_dss_clocks();
iounmap(dispc.base);
- platform_device_unregister(&omapdss_device);
}
const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index a9007c5..4802419 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -115,12 +115,12 @@ struct platform_driver htcherald_panel_driver = {
},
};
-static int htcherald_panel_drv_init(void)
+static int __init htcherald_panel_drv_init(void)
{
return platform_driver_register(&htcherald_panel_driver);
}
-static void htcherald_panel_drv_cleanup(void)
+static void __exit htcherald_panel_drv_cleanup(void)
{
platform_driver_unregister(&htcherald_panel_driver);
}
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 5bb7f6f..0f5952c 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/gpio.h>
#include <plat/mux.h>
@@ -59,7 +59,7 @@
#define TWL4030_VPLL2_DEV_GRP 0x33
#define TWL4030_VPLL2_DEDICATED 0x36
-#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
static int ldp_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index 006c2fe..7e7a65c 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
#include <asm/mach-types.h>
@@ -61,9 +61,9 @@ static int omap2evm_panel_init(struct lcd_panel *panel,
gpio_direction_output(LCD_PANEL_LR, 1);
gpio_direction_output(LCD_PANEL_UD, 1);
- twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
bklight_level = 100;
return 0;
@@ -101,7 +101,7 @@ static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
u8 c;
if ((level >= 0) && (level <= 100)) {
c = (125 * (100 - level)) / 100 + 2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
bklight_level = level;
}
return 0;
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index fc503d8..ca75cc2 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
#include <plat/mux.h>
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index ae2edc4..06840da 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
#include <asm/mach-types.h>
@@ -63,9 +63,9 @@ static int omap3evm_panel_init(struct lcd_panel *panel,
gpio_direction_output(LCD_PANEL_LR, 1);
gpio_direction_output(LCD_PANEL_UD, 1);
- twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
bklight_level = 100;
return 0;
@@ -102,7 +102,7 @@ static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
u8 c;
if ((level >= 0) && (level <= 100)) {
c = (125 * (100 - level)) / 100 + 2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
bklight_level = level;
}
return 0;
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index 56ee192..564933f 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/gpio.h>
#include <plat/mux.h>
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h
index 46e4714..af3c9e5 100644
--- a/drivers/video/omap/omapfb.h
+++ b/drivers/video/omap/omapfb.h
@@ -203,6 +203,8 @@ struct omapfb_device {
struct omapfb_mem_desc mem_desc;
struct fb_info *fb_info[OMAPFB_PLANE_NUM];
+
+ struct platform_device *dssdev; /* dummy dev for clocks */
};
#ifdef CONFIG_ARCH_OMAP1
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index c7f59a5..2c4f470 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -83,6 +83,19 @@ static struct caps_table_struct color_caps[] = {
{ 1 << OMAPFB_COLOR_YUY422, "YUY422", },
};
+static void omapdss_release(struct device *dev)
+{
+}
+
+/* dummy device for clocks */
+static struct platform_device omapdss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .release = omapdss_release,
+ },
+};
+
/*
* ---------------------------------------------------------------------------
* LCD panel
@@ -1700,6 +1713,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
fbdev->dev = &pdev->dev;
fbdev->panel = panel;
+ fbdev->dssdev = &omapdss_device;
platform_set_drvdata(pdev, fbdev);
mutex_init(&fbdev->rqueue_mutex);
@@ -1814,8 +1828,16 @@ cleanup:
static int omapfb_probe(struct platform_device *pdev)
{
+ int r;
+
BUG_ON(fbdev_pdev != NULL);
+ r = platform_device_register(&omapdss_device);
+ if (r) {
+ dev_err(&pdev->dev, "can't register omapdss device\n");
+ return r;
+ }
+
/* Delay actual initialization until the LCD is registered */
fbdev_pdev = pdev;
if (fbdev_panel != NULL)
@@ -1843,6 +1865,9 @@ static int omapfb_remove(struct platform_device *pdev)
fbdev->state = OMAPFB_DISABLED;
omapfb_free_resources(fbdev, saved_state);
+ platform_device_unregister(&omapdss_device);
+ fbdev->dssdev = NULL;
+
return 0;
}
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index fed7b1b..1162603 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,13 +83,13 @@ static inline u32 rfbi_read_reg(int idx)
static int rfbi_get_clocks(void)
{
- rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
+ rfbi.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
if (IS_ERR(rfbi.dss_ick)) {
dev_err(rfbi.fbdev->dev, "can't get ick\n");
return PTR_ERR(rfbi.dss_ick);
}
- rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
+ rfbi.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
if (IS_ERR(rfbi.dss1_fck)) {
dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
clk_put(rfbi.dss_ick);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 71d8dec..c63ce76 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -25,6 +25,13 @@ config OMAP2_DSS_DEBUG_SUPPORT
This enables debug messages. You need to enable printing
with 'debug' module parameter.
+config OMAP2_DSS_COLLECT_IRQ_STATS
+ bool "Collect DSS IRQ statistics"
+ depends on OMAP2_DSS_DEBUG_SUPPORT
+ default n
+ help
+ Collect DSS IRQ statistics, printable via debugfs
+
config OMAP2_DSS_RFBI
bool "RFBI support"
default n
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 29497a0..82918ee 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -124,6 +124,7 @@ static void restore_all_ctx(void)
dss_clk_disable_all_no_ctx();
}
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
/* CLOCKS */
static void core_dump_clocks(struct seq_file *s)
{
@@ -149,6 +150,7 @@ static void core_dump_clocks(struct seq_file *s)
clocks[i]->usecount);
}
}
+#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
static int dss_get_clock(struct clk **clock, const char *clk_name)
{
@@ -395,6 +397,14 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
+ debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
+ &dispc_dump_irqs, &dss_debug_fops);
+
+#ifdef CONFIG_OMAP2_DSS_DSI
+ debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir,
+ &dsi_dump_irqs, &dss_debug_fops);
+#endif
+
debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
&dss_dump_regs, &dss_debug_fops);
debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 6dabf4b..de8bfba 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -148,6 +148,12 @@ static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
DISPC_VID_ATTRIBUTES(0),
DISPC_VID_ATTRIBUTES(1) };
+struct dispc_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned irqs[32];
+};
+
static struct {
void __iomem *base;
@@ -160,6 +166,11 @@ static struct {
struct work_struct error_work;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dispc_irq_stats irq_stats;
+#endif
} dispc;
static void _omap_dispc_set_irqs(void);
@@ -1443,7 +1454,10 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height,
do_div(tmp, 2 * out_height * ppl);
fclk = tmp;
- if (height > 2 * out_height && ppl != out_width) {
+ if (height > 2 * out_height) {
+ if (ppl == out_width)
+ return 0;
+
tmp = pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
fclk = max(fclk, (u32) tmp);
@@ -1623,7 +1637,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
- if (fclk > dispc_fclk_rate()) {
+ if (!fclk || fclk > dispc_fclk_rate()) {
DSSERR("failed to set up scaling, "
"required fclk rate = %lu Hz, "
"current fclk rate = %lu Hz\n",
@@ -2247,6 +2261,50 @@ void dispc_dump_clocks(struct seq_file *s)
enable_clocks(0);
}
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+void dispc_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dispc_irq_stats stats;
+
+ spin_lock_irqsave(&dispc.irq_stats_lock, flags);
+
+ stats = dispc.irq_stats;
+ memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
+ dispc.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+
+ PIS(FRAMEDONE);
+ PIS(VSYNC);
+ PIS(EVSYNC_EVEN);
+ PIS(EVSYNC_ODD);
+ PIS(ACBIAS_COUNT_STAT);
+ PIS(PROG_LINE_NUM);
+ PIS(GFX_FIFO_UNDERFLOW);
+ PIS(GFX_END_WIN);
+ PIS(PAL_GAMMA_MASK);
+ PIS(OCP_ERR);
+ PIS(VID1_FIFO_UNDERFLOW);
+ PIS(VID1_END_WIN);
+ PIS(VID2_FIFO_UNDERFLOW);
+ PIS(VID2_END_WIN);
+ PIS(SYNC_LOST);
+ PIS(SYNC_LOST_DIGIT);
+ PIS(WAKEUP);
+#undef PIS
+}
+#else
+void dispc_dump_irqs(struct seq_file *s) { }
+#endif
+
void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
@@ -2665,6 +2723,13 @@ void dispc_irq_handler(void)
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dispc.irq_stats_lock);
+ dispc.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
+ spin_unlock(&dispc.irq_stats_lock);
+#endif
+
#ifdef DEBUG
if (dss_debug)
print_irq_status(irqstatus);
@@ -3012,6 +3077,11 @@ int dispc_init(void)
spin_lock_init(&dispc.irq_lock);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc.irq_stats_lock);
+ dispc.irq_stats.last_reset = jiffies;
+#endif
+
INIT_WORK(&dispc.error_work, dispc_error_worker);
dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5936487..6122178 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -204,6 +204,14 @@ struct dsi_update_region {
struct omap_dss_device *device;
};
+struct dsi_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned dsi_irqs[32];
+ unsigned vc_irqs[4][32];
+ unsigned cio_irqs[32];
+};
+
static struct
{
void __iomem *base;
@@ -258,6 +266,11 @@ static struct
#endif
int debug_read;
int debug_write;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dsi_irq_stats irq_stats;
+#endif
} dsi;
#ifdef DEBUG
@@ -528,6 +541,12 @@ void dsi_irq_handler(void)
irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dsi.irq_stats_lock);
+ dsi.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
+#endif
+
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
@@ -549,6 +568,10 @@ void dsi_irq_handler(void)
vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
+#endif
+
if (vcstatus & DSI_VC_IRQ_BTA)
complete(&dsi.bta_completion);
@@ -568,6 +591,10 @@ void dsi_irq_handler(void)
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
+#endif
+
dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
@@ -579,6 +606,10 @@ void dsi_irq_handler(void)
dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
dsi_read_reg(DSI_IRQSTATUS);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_unlock(&dsi.irq_stats_lock);
+#endif
}
@@ -797,12 +828,12 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
/* PLL_PWR_STATUS */
while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
- udelay(1);
- if (t++ > 1000) {
+ if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
return -ENODEV;
}
+ udelay(1);
}
return 0;
@@ -1226,6 +1257,95 @@ void dsi_dump_clocks(struct seq_file *s)
enable_clocks(0);
}
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+void dsi_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dsi_irq_stats stats;
+
+ spin_lock_irqsave(&dsi.irq_stats_lock, flags);
+
+ stats = dsi.irq_stats;
+ memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats));
+ dsi.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dsi.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+
+ seq_printf(s, "-- DSI interrupts --\n");
+ PIS(VC0);
+ PIS(VC1);
+ PIS(VC2);
+ PIS(VC3);
+ PIS(WAKEUP);
+ PIS(RESYNC);
+ PIS(PLL_LOCK);
+ PIS(PLL_UNLOCK);
+ PIS(PLL_RECALL);
+ PIS(COMPLEXIO_ERR);
+ PIS(HS_TX_TIMEOUT);
+ PIS(LP_RX_TIMEOUT);
+ PIS(TE_TRIGGER);
+ PIS(ACK_TRIGGER);
+ PIS(SYNC_LOST);
+ PIS(LDO_POWER_GOOD);
+ PIS(TA_TIMEOUT);
+#undef PIS
+
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
+ stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+
+ seq_printf(s, "-- VC interrupts --\n");
+ PIS(CS);
+ PIS(ECC_CORR);
+ PIS(PACKET_SENT);
+ PIS(FIFO_TX_OVF);
+ PIS(FIFO_RX_OVF);
+ PIS(BTA);
+ PIS(ECC_NO_CORR);
+ PIS(FIFO_TX_UDF);
+ PIS(PP_BUSY_CHANGE);
+#undef PIS
+
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, \
+ stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+
+ seq_printf(s, "-- CIO interrupts --\n");
+ PIS(ERRSYNCESC1);
+ PIS(ERRSYNCESC2);
+ PIS(ERRSYNCESC3);
+ PIS(ERRESC1);
+ PIS(ERRESC2);
+ PIS(ERRESC3);
+ PIS(ERRCONTROL1);
+ PIS(ERRCONTROL2);
+ PIS(ERRCONTROL3);
+ PIS(STATEULPS1);
+ PIS(STATEULPS2);
+ PIS(STATEULPS3);
+ PIS(ERRCONTENTIONLP0_1);
+ PIS(ERRCONTENTIONLP1_1);
+ PIS(ERRCONTENTIONLP0_2);
+ PIS(ERRCONTENTIONLP1_2);
+ PIS(ERRCONTENTIONLP0_3);
+ PIS(ERRCONTENTIONLP1_3);
+ PIS(ULPSACTIVENOT_ALL0);
+ PIS(ULPSACTIVENOT_ALL1);
+#undef PIS
+}
+#endif
+
void dsi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
@@ -1321,12 +1441,12 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
/* PWR_STATUS */
while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
- udelay(1);
- if (t++ > 1000) {
+ if (++t > 1000) {
DSSERR("failed to set complexio power state to "
"%d\n", state);
return -ENODEV;
}
+ udelay(1);
}
return 0;
@@ -1526,10 +1646,10 @@ static void dsi_complexio_uninit(void)
static int _dsi_wait_reset(void)
{
- int i = 0;
+ int t = 0;
while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
- if (i++ > 5) {
+ if (++t > 5) {
DSSERR("soft reset failed\n");
return -ENODEV;
}
@@ -1999,7 +2119,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
return -EINVAL;
}
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].dest_per << 6;
r = (data_id << 0) | (data << 8) | (ecc << 24);
@@ -2011,7 +2131,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
int dsi_vc_send_null(int channel)
{
u8 nullpkg[] = {0, 0, 0, 0};
- return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
+ return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
}
EXPORT_SYMBOL(dsi_vc_send_null);
@@ -2058,7 +2178,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
int r;
if (dsi.debug_read)
- DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
+ DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
if (r)
@@ -2586,7 +2706,6 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
/* using fifo not empty */
/* TX_FIFO_NOT_EMPTY */
while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
- udelay(1);
fifo_stalls++;
if (fifo_stalls > 0xfffff) {
DSSERR("fifo stalls overflow, pixels left %d\n",
@@ -2594,6 +2713,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
dsi_if_enable(0);
return -EIO;
}
+ udelay(1);
}
#elif 1
/* using fifo emptiness */
@@ -2812,11 +2932,15 @@ static int dsi_set_update_mode(struct omap_dss_device *dssdev,
static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
{
- int r;
- r = dssdev->driver->enable_te(dssdev, enable);
- /* XXX for some reason, DSI TE breaks if we don't wait here.
- * Panel bug? Needs more studying */
- msleep(100);
+ int r = 0;
+
+ if (dssdev->driver->enable_te) {
+ r = dssdev->driver->enable_te(dssdev, enable);
+ /* XXX for some reason, DSI TE breaks if we don't wait here.
+ * Panel bug? Needs more studying */
+ msleep(100);
+ }
+
return r;
}
@@ -3637,6 +3761,11 @@ int dsi_init(struct platform_device *pdev)
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dsi.irq_stats_lock);
+ dsi.irq_stats.last_reset = jiffies;
+#endif
+
init_completion(&dsi.bta_completion);
init_completion(&dsi.update_completion);
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 9b05ee6..0a26b7d 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -467,14 +467,14 @@ static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
static int _omap_dss_wait_reset(void)
{
- unsigned timeout = 1000;
+ int t = 0;
while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- udelay(1);
- if (!--timeout) {
+ if (++t > 1000) {
DSSERR("soft reset failed\n");
return -ENODEV;
}
+ udelay(1);
}
return 0;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8da5ac42..2bcb124 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -240,6 +240,7 @@ int dsi_init(struct platform_device *pdev);
void dsi_exit(void);
void dsi_dump_clocks(struct seq_file *s);
+void dsi_dump_irqs(struct seq_file *s);
void dsi_dump_regs(struct seq_file *s);
void dsi_save_context(void);
@@ -268,6 +269,7 @@ int dpi_init_display(struct omap_dss_device *dssdev);
int dispc_init(void);
void dispc_exit(void);
void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_irqs(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
@@ -367,4 +369,16 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
+{
+ int b;
+ for (b = 0; b < 32; ++b) {
+ if (irqstatus & (1 << b))
+ irq_arr[b]++;
+ }
+}
+#endif
+
#endif
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index d0b3006..b936495 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -120,7 +120,7 @@ static struct {
struct omap_dss_device *dssdev[2];
- struct kfifo *cmd_fifo;
+ struct kfifo cmd_fifo;
spinlock_t cmd_lock;
struct completion cmd_done;
atomic_t cmd_fifo_full;
@@ -1011,20 +1011,20 @@ static void process_cmd_fifo(void)
return;
while (true) {
- spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+ spin_lock_irqsave(&rfbi.cmd_lock, flags);
- len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p,
+ len = kfifo_out(&rfbi.cmd_fifo, (unsigned char *)&p,
sizeof(struct update_param));
if (len == 0) {
DSSDBG("nothing more in fifo\n");
atomic_set(&rfbi.cmd_pending, 0);
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
break;
}
/* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
BUG_ON(len != sizeof(struct update_param));
BUG_ON(p.rfbi_module > 1);
@@ -1052,25 +1052,25 @@ static void rfbi_push_cmd(struct update_param *p)
unsigned long flags;
int available;
- spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+ spin_lock_irqsave(&rfbi.cmd_lock, flags);
available = RFBI_CMD_FIFO_LEN_BYTES -
- __kfifo_len(rfbi.cmd_fifo);
+ kfifo_len(&rfbi.cmd_fifo);
/* DSSDBG("%d bytes left in fifo\n", available); */
if (available < sizeof(struct update_param)) {
DSSDBG("Going to wait because FIFO FULL..\n");
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
atomic_inc(&rfbi.cmd_fifo_full);
wait_for_completion(&rfbi.cmd_done);
/*DSSDBG("Woke up because fifo not full anymore\n");*/
continue;
}
- ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p,
+ ret = kfifo_in(&rfbi.cmd_fifo, (unsigned char *)p,
sizeof(struct update_param));
/* DSSDBG("pushed %d bytes\n", ret);*/
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
BUG_ON(ret != sizeof(struct update_param));
@@ -1155,12 +1155,12 @@ int rfbi_init(void)
{
u32 rev;
u32 l;
+ int r;
spin_lock_init(&rfbi.cmd_lock);
- rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL,
- &rfbi.cmd_lock);
- if (IS_ERR(rfbi.cmd_fifo))
- return -ENOMEM;
+ r = kfifo_alloc(&rfbi.cmd_fifo, RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL);
+ if (r)
+ return r;
init_completion(&rfbi.cmd_done);
atomic_set(&rfbi.cmd_fifo_full, 0);
@@ -1196,7 +1196,7 @@ void rfbi_exit(void)
{
DSSDBG("rfbi_exit\n");
- kfifo_free(rfbi.cmd_fifo);
+ kfifo_free(&rfbi.cmd_fifo);
iounmap(rfbi.base);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index ef29983..d17caef 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1311,6 +1311,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
if (rg->vrfb.vaddr[0]) {
iounmap(rg->vrfb.vaddr[0]);
omap_vrfb_release_ctx(&rg->vrfb);
+ rg->vrfb.vaddr[0] = NULL;
}
}
@@ -2114,6 +2115,11 @@ static int omapfb_probe(struct platform_device *pdev)
dssdev = NULL;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+ if (!dssdev->driver) {
+ dev_err(&pdev->dev, "no driver for display\n");
+ r = -EINVAL;
+ goto cleanup;
+ }
fbdev->displays[fbdev->num_displays++] = dssdev;
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 415858b..825b665 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf;
+ struct pxafb_mach_info *inf = fbi->dev->platform_data;
- if (!fbi || !fbi->dev->platform_data->smart_update) {
+ if (!inf->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index adf9632..53cb722 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -211,21 +211,23 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
/**
* s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock.
- * @id: window id.
* @sfb: The hardware state.
* @pixclock: The pixel clock wanted, in picoseconds.
*
* Given the specified pixel clock, work out the necessary divider to get
* close to the output frequency.
*/
-static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk)
+static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
{
- struct s3c_fb_pd_win *win = sfb->pdata->win[id];
unsigned long clk = clk_get_rate(sfb->bus_clk);
+ unsigned long long tmp;
unsigned int result;
- pixclk *= win->win_mode.refresh;
- result = clk / pixclk;
+ tmp = (unsigned long long)clk;
+ tmp *= pixclk;
+
+ do_div(tmp, 1000000000UL);
+ result = (unsigned int)tmp / 1000;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
pixclk, clk, result, clk / result);
@@ -301,7 +303,7 @@ static int s3c_fb_set_par(struct fb_info *info)
/* use window 0 as the basis for the lcd output timings */
if (win_no == 0) {
- clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock);
+ clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
data = sfb->pdata->vidcon0;
data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 8a65fb6..a69830d 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -281,6 +281,7 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
+ struct sh_mobile_lcdc_board_cfg *bcfg = &ch->cfg.board_cfg;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
@@ -305,10 +306,17 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
/* trigger panel update */
dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+ if (bcfg->start_transfer)
+ bcfg->start_transfer(bcfg->board_data, ch,
+ &sh_mobile_lcdc_sys_bus_ops);
lcdc_write_chan(ch, LDSM2R, 1);
dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
- } else
+ } else {
+ if (bcfg->start_transfer)
+ bcfg->start_transfer(bcfg->board_data, ch,
+ &sh_mobile_lcdc_sys_bus_ops);
lcdc_write_chan(ch, LDSM2R, 1);
+ }
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c
index 9d4f3a4..d5077df 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/via/accel.c
@@ -137,7 +137,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
tmp, dst_pitch);
return -EINVAL;
}
- tmp = (tmp >> 3) | (dst_pitch << (16 - 3));
+ tmp = VIA_PITCH_ENABLE | (tmp >> 3) | (dst_pitch << (16 - 3));
writel(tmp, engine + 0x38);
if (op == VIA_BITBLT_FILL)
@@ -352,6 +352,9 @@ int viafb_init_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
+ /* Init 2D engine reg to reset 2D engine */
+ writel(0x0, engine + VIA_REG_KEYCONTROL);
+
/* Init AGP and VQ regs */
switch (chip_name) {
case UNICHROME_K8M890:
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 10d8c4b..3028e7d 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -177,16 +177,15 @@ static int viafb_set_par(struct fb_info *info)
}
if (vmode_index != VIA_RES_INVALID) {
- viafb_setmode(vmode_index, info->var.xres, info->var.yres,
- info->var.bits_per_pixel, vmode_index1,
- viafb_second_xres, viafb_second_yres, viafb_bpp1);
-
viafb_update_fix(info);
viafb_bpp = info->var.bits_per_pixel;
if (info->var.accel_flags & FB_ACCELF_TEXT)
info->flags &= ~FBINFO_HWACCEL_DISABLED;
else
info->flags |= FBINFO_HWACCEL_DISABLED;
+ viafb_setmode(vmode_index, info->var.xres, info->var.yres,
+ info->var.bits_per_pixel, vmode_index1,
+ viafb_second_xres, viafb_second_yres, viafb_bpp1);
}
return 0;
@@ -680,7 +679,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (!viafb_gamma_table)
return -ENOMEM;
if (copy_from_user(viafb_gamma_table, argp,
- sizeof(viafb_gamma_table))) {
+ 256 * sizeof(u32))) {
kfree(viafb_gamma_table);
return -EFAULT;
}
@@ -694,7 +693,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
return -ENOMEM;
viafb_get_gamma_table(viafb_gamma_table);
if (copy_to_user(argp, viafb_gamma_table,
- sizeof(viafb_gamma_table))) {
+ 256 * sizeof(u32))) {
kfree(viafb_gamma_table);
return -EFAULT;
}
@@ -872,7 +871,9 @@ static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo)
return -ENODEV;
- if (chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2)
+ /* LCD ouput does not support hw cursors (at least on VN896) */
+ if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) ||
+ viafb_LCD_ON)
return -ENODEV;
viafb_show_hw_cursor(info, HW_Cursor_OFF);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9dd5880..505be88 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -266,7 +266,7 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
-static struct virtio_driver virtio_balloon = {
+static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
@@ -279,12 +279,12 @@ static struct virtio_driver virtio_balloon = {
static int __init init(void)
{
- return register_virtio_driver(&virtio_balloon);
+ return register_virtio_driver(&virtio_balloon_driver);
}
static void __exit fini(void)
{
- unregister_virtio_driver(&virtio_balloon);
+ unregister_virtio_driver(&virtio_balloon_driver);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d958b76..050ee14 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -368,7 +368,7 @@ config ALIM7101_WDT
config GEODE_WDT
tristate "AMD Geode CS5535/CS5536 Watchdog"
- depends on MGEODE_LX
+ depends on CS5535_MFGPT
help
This driver enables a watchdog capability built into the
CS5535/CS5536 companion chips for the AMD Geode GX and LX
@@ -396,8 +396,8 @@ config SBC_FITPC2_WATCHDOG
tristate "Compulab SBC-FITPC2 watchdog"
depends on X86
---help---
- This is the driver for the built-in watchdog timer on the fit-PC2
- Single-board computer made by Compulab.
+ This is the driver for the built-in watchdog timer on the fit-PC2,
+ fit-PC2i, CM-iAM single-board computers made by Compulab.
It`s possible to enable watchdog timer either from BIOS (F2) or from booted Linux.
When "Watchdog Timer Value" enabled one can set 31-255 s operational range.
@@ -815,16 +815,6 @@ config PNX833X_WDT
timer has expired and no process has written to /dev/watchdog during
that time.
-config WDT_RM9K_GPI
- tristate "RM9000/GPI hardware watchdog"
- depends on CPU_RM9000
- help
- Watchdog implementation using the GPI hardware found on
- PMC-Sierra RM9xxx CPUs.
-
- To compile this driver as a module, choose M here: the
- module will be called rm9k_wdt.
-
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
depends on CPU_SB1
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 89c045d..475c611 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -109,7 +109,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
-obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
index 9c65944..9d7d155 100644
--- a/drivers/watchdog/adx_wdt.c
+++ b/drivers/watchdog/adx_wdt.c
@@ -242,14 +242,14 @@ static int __devinit adx_wdt_probe(struct platform_device *pdev)
}
res = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!res) {
dev_err(&pdev->dev, "cannot request I/O memory region\n");
return -ENXIO;
}
wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!wdt->base) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENXIO;
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index e8ae638..0378479 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -326,7 +326,7 @@ static int __init at32_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
}
- wdt->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ wdt->regs = ioremap(regs->start, resource_size(regs));
if (!wdt->regs) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "could not map I/O memory\n");
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index c7b3f9d..2159e66 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
/*
* Blackfin On-Chip Watchdog Driver
- * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
*
* Originally based on softdog.c
- * Copyright 2006-2007 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
* Copyright 2006-2007 Michele d'Amico
* Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
*/
static int bfin_wdt_set_timeout(unsigned long t)
{
- u32 cnt;
+ u32 cnt, max_t, sclk;
unsigned long flags;
- stampit();
+ sclk = get_sclk();
+ max_t = -1 / sclk;
+ cnt = t * sclk;
+ stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
- cnt = t * get_sclk();
- if (cnt < get_sclk()) {
+ if (t > max_t) {
printk(KERN_WARNING PFX "timeout value is too large\n");
return -EINVAL;
}
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 9d7520f..887136d 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -221,7 +221,7 @@ static int __devinit davinci_wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- size = res->end - res->start + 1;
+ size = resource_size(res);
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 9acf001..38252ff 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -1,6 +1,7 @@
-/* Watchdog timer for the Geode GX/LX with the CS5535/CS5536 companion chip
+/* Watchdog timer for machines with the CS5535/CS5536 companion chip
*
* Copyright (C) 2006-2007, Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,7 +20,7 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#define GEODEWDT_HZ 500
#define GEODEWDT_SCALE 6
@@ -46,25 +47,25 @@ MODULE_PARM_DESC(nowayout,
static struct platform_device *geodewdt_platform_device;
static unsigned long wdt_flags;
-static int wdt_timer;
+static struct cs5535_mfgpt_timer *wdt_timer;
static int safe_close;
static void geodewdt_ping(void)
{
/* Stop the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
/* Reset the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
/* Enable the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
}
static void geodewdt_disable(void)
{
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
}
static int geodewdt_set_heartbeat(int val)
@@ -72,10 +73,10 @@ static int geodewdt_set_heartbeat(int val)
if (val < 1 || val > GEODEWDT_MAX_SECONDS)
return -EINVAL;
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
timeout = val;
return 0;
@@ -215,28 +216,25 @@ static struct miscdevice geodewdt_miscdev = {
static int __devinit geodewdt_probe(struct platform_device *dev)
{
- int ret, timer;
-
- timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ int ret;
- if (timer == -1) {
+ wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ if (!wdt_timer) {
printk(KERN_ERR "geodewdt: No timers were available\n");
return -ENODEV;
}
- wdt_timer = timer;
-
/* Set up the timer */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
GEODEWDT_SCALE | (3 << 8));
/* Set up comparator 2 to reset when the event fires */
- geode_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
+ cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
/* Set up the initial timeout */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
timeout * GEODEWDT_HZ);
ret = misc_register(&geodewdt_miscdev);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index e44fbb3..4bdb7f1 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -29,7 +29,9 @@
* document number 313056-003, 313057-017: 82801H (ICH8)
* document number 316972-004, 316973-012: 82801I (ICH9)
* document number 319973-002, 319974-002: 82801J (ICH10)
- * document number 322169-001, 322170-001: 5 Series, 3400 Series (PCH)
+ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ * document number 320066-003, 320257-008: EP80597 (IICH)
+ * document number TBD : Cougar Point (CPT)
*/
/*
@@ -99,7 +101,22 @@ enum iTCO_chipsets {
TCO_ICH10DO, /* ICH10DO */
TCO_PCH, /* PCH Desktop Full Featured */
TCO_PCHM, /* PCH Mobile Full Featured */
+ TCO_P55, /* P55 */
+ TCO_PM55, /* PM55 */
+ TCO_H55, /* H55 */
+ TCO_QM57, /* QM57 */
+ TCO_H57, /* H57 */
+ TCO_HM55, /* HM55 */
+ TCO_Q57, /* Q57 */
+ TCO_HM57, /* HM57 */
TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
+ TCO_QS57, /* QS57 */
+ TCO_3400, /* 3400 */
+ TCO_3420, /* 3420 */
+ TCO_3450, /* 3450 */
+ TCO_EP80579, /* EP80579 */
+ TCO_CPTD, /* CPT Desktop */
+ TCO_CPTM, /* CPT Mobile */
};
static struct {
@@ -142,7 +159,22 @@ static struct {
{"ICH10DO", 2},
{"PCH Desktop Full Featured", 2},
{"PCH Mobile Full Featured", 2},
+ {"P55", 2},
+ {"PM55", 2},
+ {"H55", 2},
+ {"QM57", 2},
+ {"H57", 2},
+ {"HM55", 2},
+ {"Q57", 2},
+ {"HM57", 2},
{"PCH Mobile SFF Full Featured", 2},
+ {"QS57", 2},
+ {"3400", 2},
+ {"3420", 2},
+ {"3450", 2},
+ {"EP80579", 2},
+ {"CPT Desktop", 2},
+ {"CPT Mobile", 2},
{NULL, 0}
};
@@ -213,7 +245,22 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
{ ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
{ ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
{ ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
+ { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
+ { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
+ { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
+ { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
+ { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
+ { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
+ { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
+ { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
{ ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
+ { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
+ { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
+ { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
+ { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
+ { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
+ { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)},
+ { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index 4f4b35a..3c79dc5 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 83fa34b..a2dc07c 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -350,7 +350,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
ret = -ENXIO;
goto err_free;
}
- wdt->base = ioremap(res->start, res->end - res->start + 1);
+ wdt->base = ioremap(res->start, resource_size(res));
if (!wdt->base) {
ret = -ENOMEM;
goto err_free;
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index acf589d..a51dbe4 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -275,7 +275,7 @@ static int __devinit mv64x60_wdt_probe(struct platform_device *dev)
if (!r)
return -ENODEV;
- mv64x60_wdt_regs = ioremap(r->start, r->end - r->start + 1);
+ mv64x60_wdt_regs = ioremap(r->start, resource_size(r));
if (mv64x60_wdt_regs == NULL)
return -ENOMEM;
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 429ea99..c6aaf28 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -277,8 +277,7 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
goto err_busy;
}
- mem = request_mem_region(res->start, res->end - res->start + 1,
- pdev->name);
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
if (!mem) {
ret = -EBUSY;
goto err_busy;
@@ -306,7 +305,7 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
goto err_clk;
}
- wdev->base = ioremap(res->start, res->end - res->start + 1);
+ wdev->base = ioremap(res->start, resource_size(res));
if (!wdev->base) {
ret = -ENOMEM;
goto err_ioremap;
@@ -358,7 +357,7 @@ err_clk:
kfree(wdev);
err_kzalloc:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
err_busy:
err_get_resource:
@@ -383,7 +382,7 @@ static int __devexit omap_wdt_remove(struct platform_device *pdev)
return -ENOENT;
misc_deregister(&(wdev->omap_wdt_miscdev));
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
clk_put(wdev->ick);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 4d227b15..430a584 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -264,7 +264,7 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- size = res->end - res->start + 1;
+ size = resource_size(res);
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
deleted file mode 100644
index bb66958..0000000
--- a/drivers/watchdog/rm9k_wdt.c
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx
- * chips.
- *
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/rm9k-ocd.h>
-
-#include <rm9k_wdt.h>
-
-
-#define CLOCK 125000000
-#define MAX_TIMEOUT_SECONDS 32
-#define CPCCR 0x0080
-#define CPGIG1SR 0x0044
-#define CPGIG1ER 0x0054
-
-
-/* Function prototypes */
-static irqreturn_t wdt_gpi_irqhdl(int, void *);
-static void wdt_gpi_start(void);
-static void wdt_gpi_stop(void);
-static void wdt_gpi_set_timeout(unsigned int);
-static int wdt_gpi_open(struct inode *, struct file *);
-static int wdt_gpi_release(struct inode *, struct file *);
-static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t,
- loff_t *);
-static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
-static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
-static const struct resource *wdt_gpi_get_resource(struct platform_device *,
- const char *, unsigned int);
-static int __init wdt_gpi_probe(struct platform_device *);
-static int __exit wdt_gpi_remove(struct platform_device *);
-
-
-static const char wdt_gpi_name[] = "wdt_gpi";
-static atomic_t opencnt;
-static int expect_close;
-static int locked;
-
-
-/* These are set from device resources */
-static void __iomem *wd_regs;
-static unsigned int wd_irq, wd_ctr;
-
-
-/* Module arguments */
-static int timeout = MAX_TIMEOUT_SECONDS;
-module_param(timeout, int, 0444);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
-
-static unsigned long resetaddr = 0xbffdc200;
-module_param(resetaddr, ulong, 0444);
-MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset");
-
-static unsigned long flagaddr = 0xbffdc104;
-module_param(flagaddr, ulong, 0444);
-MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to");
-
-static int powercycle;
-module_param(powercycle, bool, 0444);
-MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires");
-
-static int nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0444);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
-
-
-/* Kernel interfaces */
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = wdt_gpi_open,
- .release = wdt_gpi_release,
- .write = wdt_gpi_write,
- .unlocked_ioctl = wdt_gpi_ioctl,
-};
-
-static struct miscdevice miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = wdt_gpi_name,
- .fops = &fops,
-};
-
-static struct notifier_block wdt_gpi_shutdown = {
- .notifier_call = wdt_gpi_notify,
-};
-
-
-/* Interrupt handler */
-static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
-{
- if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
- return IRQ_NONE;
- __raw_writel(0x1, wd_regs + 0x0008);
-
-
- printk(KERN_CRIT "%s: watchdog expired - resetting system\n",
- wdt_gpi_name);
-
- *(volatile char *) flagaddr |= 0x01;
- *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2;
- iob();
- while (1)
- cpu_relax();
-}
-
-
-/* Watchdog functions */
-static void wdt_gpi_start(void)
-{
- u32 reg;
-
- lock_titan_regs();
- reg = titan_readl(CPGIG1ER);
- titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER);
- iob();
- unlock_titan_regs();
-}
-
-static void wdt_gpi_stop(void)
-{
- u32 reg;
-
- lock_titan_regs();
- reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
- titan_writel(reg, CPCCR);
- reg = titan_readl(CPGIG1ER);
- titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER);
- iob();
- unlock_titan_regs();
-}
-
-static void wdt_gpi_set_timeout(unsigned int to)
-{
- u32 reg;
- const u32 wdval = (to * CLOCK) & ~0x0000000f;
-
- lock_titan_regs();
- reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
- titan_writel(reg, CPCCR);
- wmb();
- __raw_writel(wdval, wd_regs + 0x0000);
- wmb();
- titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR);
- wmb();
- titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR);
- iob();
- unlock_titan_regs();
-}
-
-
-/* /dev/watchdog operations */
-static int wdt_gpi_open(struct inode *inode, struct file *file)
-{
- int res;
-
- if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
- return -EBUSY;
-
- expect_close = 0;
- if (locked) {
- module_put(THIS_MODULE);
- free_irq(wd_irq, &miscdev);
- locked = 0;
- }
-
- res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED,
- wdt_gpi_name, &miscdev);
- if (unlikely(res))
- return res;
-
- wdt_gpi_set_timeout(timeout);
- wdt_gpi_start();
-
- printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
- wdt_gpi_name, timeout);
- return nonseekable_open(inode, file);
-}
-
-static int wdt_gpi_release(struct inode *inode, struct file *file)
-{
- if (nowayout) {
- printk(KERN_INFO "%s: no way out - watchdog left running\n",
- wdt_gpi_name);
- __module_get(THIS_MODULE);
- locked = 1;
- } else {
- if (expect_close) {
- wdt_gpi_stop();
- free_irq(wd_irq, &miscdev);
- printk(KERN_INFO "%s: watchdog stopped\n",
- wdt_gpi_name);
- } else {
- printk(KERN_CRIT "%s: unexpected close() -"
- " watchdog left running\n",
- wdt_gpi_name);
- wdt_gpi_set_timeout(timeout);
- __module_get(THIS_MODULE);
- locked = 1;
- }
- }
-
- atomic_inc(&opencnt);
- return 0;
-}
-
-static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s,
- loff_t *o)
-{
- char val;
-
- wdt_gpi_set_timeout(timeout);
- expect_close = (s > 0) && !get_user(val, d) && (val == 'V');
- return s ? 1 : 0;
-}
-
-static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- long res = -ENOTTY;
- const long size = _IOC_SIZE(cmd);
- int stat;
- void __user *argp = (void __user *)arg;
- static struct watchdog_info wdinfo = {
- .identity = "RM9xxx/GPI watchdog",
- .firmware_version = 0,
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
- };
-
- if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE))
- return -ENOTTY;
-
- if ((_IOC_DIR(cmd) & _IOC_READ)
- && !access_ok(VERIFY_WRITE, arg, size))
- return -EFAULT;
-
- if ((_IOC_DIR(cmd) & _IOC_WRITE)
- && !access_ok(VERIFY_READ, arg, size))
- return -EFAULT;
-
- expect_close = 0;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- wdinfo.options = nowayout ?
- WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
- WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE;
- res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size;
- break;
-
- case WDIOC_GETSTATUS:
- break;
-
- case WDIOC_GETBOOTSTATUS:
- stat = (*(volatile char *) flagaddr & 0x01)
- ? WDIOF_CARDRESET : 0;
- res = __copy_to_user(argp, &stat, size) ?
- -EFAULT : size;
- break;
-
- case WDIOC_SETOPTIONS:
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_gpi_set_timeout(timeout);
- res = size;
- break;
-
- case WDIOC_SETTIMEOUT:
- {
- int val;
- if (unlikely(__copy_from_user(&val, argp, size))) {
- res = -EFAULT;
- break;
- }
-
- if (val > MAX_TIMEOUT_SECONDS)
- val = MAX_TIMEOUT_SECONDS;
- timeout = val;
- wdt_gpi_set_timeout(val);
- res = size;
- printk(KERN_INFO "%s: timeout set to %u seconds\n",
- wdt_gpi_name, timeout);
- }
- break;
-
- case WDIOC_GETTIMEOUT:
- res = __copy_to_user(argp, &timeout, size) ?
- -EFAULT : size;
- break;
- }
-
- return res;
-}
-
-
-/* Shutdown notifier */
-static int wdt_gpi_notify(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- wdt_gpi_stop();
-
- return NOTIFY_DONE;
-}
-
-
-/* Init & exit procedures */
-static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
- const char *name, unsigned int type)
-{
- char buf[80];
- if (snprintf(buf, sizeof(buf), "%s_0", name) >= sizeof(buf))
- return NULL;
- return platform_get_resource_byname(pdv, type, buf);
-}
-
-/* No hotplugging on the platform bus - use __devinit */
-static int __devinit wdt_gpi_probe(struct platform_device *pdv)
-{
- int res;
- const struct resource
- * const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS,
- IORESOURCE_MEM),
- * const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ,
- IORESOURCE_IRQ),
- * const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER,
- 0);
-
- if (unlikely(!rr || !ri || !rc))
- return -ENXIO;
-
- wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start);
- if (unlikely(!wd_regs))
- return -ENOMEM;
- wd_irq = ri->start;
- wd_ctr = rc->start;
- res = misc_register(&miscdev);
- if (res)
- iounmap(wd_regs);
- else
- register_reboot_notifier(&wdt_gpi_shutdown);
- return res;
-}
-
-static int __devexit wdt_gpi_remove(struct platform_device *dev)
-{
- int res;
-
- unregister_reboot_notifier(&wdt_gpi_shutdown);
- res = misc_deregister(&miscdev);
- iounmap(wd_regs);
- wd_regs = NULL;
- return res;
-}
-
-
-/* Device driver init & exit */
-static struct platform_driver wgt_gpi_driver = {
- .driver = {
- .name = wdt_gpi_name,
- .owner = THIS_MODULE,
- },
- .probe = wdt_gpi_probe,
- .remove = __devexit_p(wdt_gpi_remove),
-};
-
-static int __init wdt_gpi_init_module(void)
-{
- atomic_set(&opencnt, 1);
- if (timeout > MAX_TIMEOUT_SECONDS)
- timeout = MAX_TIMEOUT_SECONDS;
- return platform_driver_register(&wdt_gpi_driver);
-}
-
-static void __exit wdt_gpi_cleanup_module(void)
-{
- platform_driver_unregister(&wdt_gpi_driver);
-}
-
-module_init(wdt_gpi_init_module);
-module_exit(wdt_gpi_cleanup_module);
-
-MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
-MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices");
-MODULE_VERSION("0.1");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 85b93e1..8760a26 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -421,7 +421,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- size = (res->end - res->start) + 1;
+ size = resource_size(res);
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
dev_err(dev, "failed to get memory region\n");
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 91430a8..e6763d2 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -46,9 +46,9 @@ static DEFINE_SPINLOCK(wdt_lock);
static void wdt_send_data(unsigned char command, unsigned char data)
{
outb(command, COMMAND_PORT);
- mdelay(100);
+ msleep(100);
outb(data, DATA_PORT);
- mdelay(200);
+ msleep(200);
}
static void wdt_enable(void)
@@ -202,11 +202,10 @@ static int __init fitpc2_wdt_init(void)
{
int err;
- if (strcmp("SBC-FITPC2", dmi_get_system_info(DMI_BOARD_NAME))) {
- pr_info("board name is: %s. Should be SBC-FITPC2\n",
- dmi_get_system_info(DMI_BOARD_NAME));
+ if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
return -ENODEV;
- }
+
+ pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 6adab77..d635566 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -214,12 +214,10 @@ static int __init txx9wdt_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
- if (!devm_request_mem_region(&dev->dev,
- res->start, res->end - res->start + 1,
+ if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
"txx9wdt"))
goto exit_busy;
- txx9wdt_reg = devm_ioremap(&dev->dev,
- res->start, res->end - res->start + 1);
+ txx9wdt_reg = devm_ioremap(&dev->dev, res->start, resource_size(res));
if (!txx9wdt_reg)
goto exit_busy;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c499793..5d42d55 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -102,15 +102,15 @@ static void do_suspend(void)
goto out_thaw;
}
+ printk(KERN_DEBUG "suspending xenstore...\n");
+ xs_suspend();
+
err = dpm_suspend_noirq(PMSG_SUSPEND);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto out_resume;
}
- printk(KERN_DEBUG "suspending xenstore...\n");
- xs_suspend();
-
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
dpm_resume_noirq(PMSG_RESUME);
@@ -120,13 +120,13 @@ static void do_suspend(void)
cancelled = 1;
}
+out_resume:
if (!cancelled) {
xen_arch_resume();
xs_resume();
} else
xs_suspend_cancel();
-out_resume:
dpm_resume_end(PMSG_RESUME);
/* Make sure timer events get retriggered on all CPUs */
OpenPOWER on IntegriCloud