summaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorTimothy Pearson <tpearson@raptorengineering.com>2017-08-23 14:45:25 -0500
committerTimothy Pearson <tpearson@raptorengineering.com>2017-08-23 14:45:25 -0500
commitfcbb27b0ec6dcbc5a5108cb8fb19eae64593d204 (patch)
tree22962a4387943edc841c72a4e636a068c66d58fd /drivers/ata
downloadast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.zip
ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.tar.gz
Initial import of modified Linux 2.6.28 tree
Original upstream URL: git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git | branch linux-2.6.28.y
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/Kconfig728
-rw-r--r--drivers/ata/Makefile85
-rw-r--r--drivers/ata/ahci.c2718
-rw-r--r--drivers/ata/ata_generic.c214
-rw-r--r--drivers/ata/ata_piix.c1556
-rw-r--r--drivers/ata/libata-acpi.c1037
-rw-r--r--drivers/ata/libata-core.c6636
-rw-r--r--drivers/ata/libata-eh.c3423
-rw-r--r--drivers/ata/libata-pmp.c1020
-rw-r--r--drivers/ata/libata-scsi.c3716
-rw-r--r--drivers/ata/libata-sff.c2876
-rw-r--r--drivers/ata/libata.h210
-rw-r--r--drivers/ata/pata_acpi.c305
-rw-r--r--drivers/ata/pata_ali.c610
-rw-r--r--drivers/ata/pata_amd.c609
-rw-r--r--drivers/ata/pata_artop.c441
-rw-r--r--drivers/ata/pata_at32.c409
-rw-r--r--drivers/ata/pata_atiixp.c287
-rw-r--r--drivers/ata/pata_bf54x.c1743
-rw-r--r--drivers/ata/pata_cmd640.c277
-rw-r--r--drivers/ata/pata_cmd64x.c440
-rw-r--r--drivers/ata/pata_cs5520.c361
-rw-r--r--drivers/ata/pata_cs5530.c383
-rw-r--r--drivers/ata/pata_cs5535.c238
-rw-r--r--drivers/ata/pata_cs5536.c301
-rw-r--r--drivers/ata/pata_cypress.c178
-rw-r--r--drivers/ata/pata_efar.c303
-rw-r--r--drivers/ata/pata_hpt366.c453
-rw-r--r--drivers/ata/pata_hpt37x.c1059
-rw-r--r--drivers/ata/pata_hpt3x2n.c594
-rw-r--r--drivers/ata/pata_hpt3x3.c260
-rw-r--r--drivers/ata/pata_icside.c639
-rw-r--r--drivers/ata/pata_isapnp.c139
-rw-r--r--drivers/ata/pata_it8213.c314
-rw-r--r--drivers/ata/pata_it821x.c985
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c227
-rw-r--r--drivers/ata/pata_jmicron.c185
-rw-r--r--drivers/ata/pata_legacy.c1296
-rw-r--r--drivers/ata/pata_marvell.c196
-rw-r--r--drivers/ata/pata_mpc52xx.c511
-rw-r--r--drivers/ata/pata_mpiix.c251
-rw-r--r--drivers/ata/pata_netcell.c112
-rw-r--r--drivers/ata/pata_ninja32.c208
-rw-r--r--drivers/ata/pata_ns87410.c188
-rw-r--r--drivers/ata/pata_ns87415.c415
-rw-r--r--drivers/ata/pata_of_platform.c114
-rw-r--r--drivers/ata/pata_oldpiix.c290
-rw-r--r--drivers/ata/pata_opti.c214
-rw-r--r--drivers/ata/pata_optidma.c469
-rw-r--r--drivers/ata/pata_pcmcia.c451
-rw-r--r--drivers/ata/pata_pdc2027x.c783
-rw-r--r--drivers/ata/pata_pdc202xx_old.c368
-rw-r--r--drivers/ata/pata_platform.c280
-rw-r--r--drivers/ata/pata_qdi.c366
-rw-r--r--drivers/ata/pata_radisys.c269
-rw-r--r--drivers/ata/pata_rb532_cf.c284
-rw-r--r--drivers/ata/pata_rz1000.c156
-rw-r--r--drivers/ata/pata_sc1200.c253
-rw-r--r--drivers/ata/pata_scc.c1189
-rw-r--r--drivers/ata/pata_sch.c206
-rw-r--r--drivers/ata/pata_serverworks.c538
-rw-r--r--drivers/ata/pata_sil680.c413
-rw-r--r--drivers/ata/pata_sis.c864
-rw-r--r--drivers/ata/pata_sl82c105.c352
-rw-r--r--drivers/ata/pata_triflex.c241
-rw-r--r--drivers/ata/pata_via.c636
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/pdc_adma.c695
-rw-r--r--drivers/ata/sata_fsl.c1411
-rw-r--r--drivers/ata/sata_inic162x.c935
-rw-r--r--drivers/ata/sata_mv.c3489
-rw-r--r--drivers/ata/sata_nv.c2529
-rw-r--r--drivers/ata/sata_promise.c1151
-rw-r--r--drivers/ata/sata_promise.h157
-rw-r--r--drivers/ata/sata_qstor.c683
-rw-r--r--drivers/ata/sata_sil.c701
-rw-r--r--drivers/ata/sata_sil24.c1388
-rw-r--r--drivers/ata/sata_sis.c351
-rw-r--r--drivers/ata/sata_svw.c551
-rw-r--r--drivers/ata/sata_sx4.c1454
-rw-r--r--drivers/ata/sata_uli.c262
-rw-r--r--drivers/ata/sata_via.c630
-rw-r--r--drivers/ata/sata_vsc.c463
-rw-r--r--drivers/ata/sis.h5
84 files changed, 65009 insertions, 0 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
new file mode 100644
index 0000000..421b7c7
--- /dev/null
+++ b/drivers/ata/Kconfig
@@ -0,0 +1,728 @@
+#
+# SATA/PATA driver configuration
+#
+
+menuconfig ATA
+ tristate "Serial ATA (prod) and Parallel ATA (experimental) drivers"
+ depends on HAS_IOMEM
+ depends on BLOCK
+ depends on !(M32R || M68K) || BROKEN
+ select SCSI
+ ---help---
+ If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+ any other ATA device under Linux, say Y and make sure that you know
+ the name of your ATA host adapter (the card inside your computer
+ that "speaks" the ATA protocol, also called ATA controller),
+ because you will be asked for it.
+
+ NOTE: ATA enables basic SCSI support; *however*,
+ 'SCSI disk support', 'SCSI tape support', or
+ 'SCSI CDROM support' may also be needed,
+ depending on your hardware configuration.
+
+if ATA
+
+config ATA_NONSTANDARD
+ bool
+ default n
+
+config ATA_ACPI
+ bool "ATA ACPI Support"
+ depends on ACPI && PCI
+ select ACPI_DOCK
+ default y
+ help
+ This option adds support for ATA-related ACPI objects.
+ These ACPI objects add the ability to retrieve taskfiles
+ from the ACPI BIOS and write them to the disk controller.
+ These objects may be related to performance, security,
+ power management, or other areas.
+ You can disable this at kernel boot time by using the
+ option libata.noacpi=1
+
+config SATA_PMP
+ bool "SATA Port Multiplier support"
+ default y
+ help
+ This option adds support for SATA Port Multipliers
+ (the SATA version of an ethernet hub, or SAS expander).
+
+config SATA_AHCI
+ tristate "AHCI SATA support"
+ depends on PCI
+ help
+ This option enables support for AHCI Serial ATA.
+
+ If unsure, say N.
+
+config SATA_SIL24
+ tristate "Silicon Image 3124/3132 SATA support"
+ depends on PCI
+ help
+ This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+ If unsure, say N.
+
+config SATA_FSL
+ tristate "Freescale 3.0Gbps SATA support"
+ depends on FSL_SOC
+ help
+ This option enables support for Freescale 3.0Gbps SATA controller.
+ It can be found on MPC837x and MPC8315.
+
+ If unsure, say N.
+
+config ATA_SFF
+ bool "ATA SFF support"
+ default y
+ help
+ This option adds support for ATA controllers with SFF
+ compliant or similar programming interface.
+
+ SFF is the legacy IDE interface that has been around since
+ the dawn of time. Almost all PATA controllers have an
+ SFF interface. Many SATA controllers have an SFF interface
+ when configured into a legacy compatibility mode.
+
+ For users with exclusively modern controllers like AHCI,
+ Silicon Image 3124, or Marvell 6440, you may choose to
+ disable this uneeded SFF support.
+
+ If unsure, say Y.
+
+if ATA_SFF
+
+config SATA_SVW
+ tristate "ServerWorks Frodo / Apple K2 SATA support"
+ depends on PCI
+ help
+ This option enables support for Broadcom/Serverworks/Apple K2
+ SATA support.
+
+ If unsure, say N.
+
+config ATA_PIIX
+ tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
+ depends on PCI
+ help
+ This option enables support for ICH5/6/7/8 Serial ATA
+ and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
+ host controllers.
+
+ If unsure, say N.
+
+config SATA_MV
+ tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This option enables support for the Marvell Serial ATA family.
+ Currently supports 88SX[56]0[48][01] chips.
+
+ If unsure, say N.
+
+config SATA_NV
+ tristate "NVIDIA SATA support"
+ depends on PCI
+ help
+ This option enables support for NVIDIA Serial ATA.
+
+ If unsure, say N.
+
+config PDC_ADMA
+ tristate "Pacific Digital ADMA support"
+ depends on PCI
+ help
+ This option enables support for Pacific Digital ADMA controllers
+
+ If unsure, say N.
+
+config SATA_QSTOR
+ tristate "Pacific Digital SATA QStor support"
+ depends on PCI
+ help
+ This option enables support for Pacific Digital Serial ATA QStor.
+
+ If unsure, say N.
+
+config SATA_PROMISE
+ tristate "Promise SATA TX2/TX4 support"
+ depends on PCI
+ help
+ This option enables support for Promise Serial ATA TX2/TX4.
+
+ If unsure, say N.
+
+config SATA_SX4
+ tristate "Promise SATA SX4 support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for Promise Serial ATA SX4.
+
+ If unsure, say N.
+
+config SATA_SIL
+ tristate "Silicon Image SATA support"
+ depends on PCI
+ help
+ This option enables support for Silicon Image Serial ATA.
+
+ If unsure, say N.
+
+config SATA_SIS
+ tristate "SiS 964/965/966/180 SATA support"
+ depends on PCI
+ select PATA_SIS
+ help
+ This option enables support for SiS Serial ATA on
+ SiS 964/965/966/180 and Parallel ATA on SiS 180.
+ The PATA support for SiS 180 requires additionally to
+ enable the PATA_SIS driver in the config.
+ If unsure, say N.
+
+config SATA_ULI
+ tristate "ULi Electronics SATA support"
+ depends on PCI
+ help
+ This option enables support for ULi Electronics SATA.
+
+ If unsure, say N.
+
+config SATA_VIA
+ tristate "VIA SATA support"
+ depends on PCI
+ help
+ This option enables support for VIA Serial ATA.
+
+ If unsure, say N.
+
+config SATA_VITESSE
+ tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
+ depends on PCI
+ help
+ This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
+
+ If unsure, say N.
+
+config SATA_INIC162X
+ tristate "Initio 162x SATA support"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+
+config PATA_ACPI
+ tristate "ACPI firmware driver for PATA"
+ depends on ATA_ACPI
+ help
+ This option enables an ACPI method driver which drives
+ motherboard PATA controller interfaces through the ACPI
+ firmware in the BIOS. This driver can sometimes handle
+ otherwise unsupported hardware.
+
+config PATA_ALI
+ tristate "ALi PATA support"
+ depends on PCI
+ help
+ This option enables support for the ALi ATA interfaces
+ found on the many ALi chipsets.
+
+ If unsure, say N.
+
+config PATA_AMD
+ tristate "AMD/NVidia PATA support"
+ depends on PCI
+ help
+ This option enables support for the AMD and NVidia PATA
+ interfaces found on the chipsets for Athlon/Athlon64.
+
+ If unsure, say N.
+
+config PATA_ARTOP
+ tristate "ARTOP 6210/6260 PATA support"
+ depends on PCI
+ help
+ This option enables support for ARTOP PATA controllers.
+
+ If unsure, say N.
+
+config PATA_AT32
+ tristate "Atmel AVR32 PATA support (Experimental)"
+ depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
+ help
+ This option enables support for the IDE devices on the
+ Atmel AT32AP platform.
+
+ If unsure, say N.
+
+config PATA_ATIIXP
+ tristate "ATI PATA support"
+ depends on PCI
+ help
+ This option enables support for the ATI ATA interfaces
+ found on the many ATI chipsets.
+
+ If unsure, say N.
+
+config PATA_CMD640_PCI
+ tristate "CMD640 PCI PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the CMD640 PCI IDE
+ interface chip. Only the primary channel is currently
+ supported.
+
+ If unsure, say N.
+
+config PATA_CMD64X
+ tristate "CMD64x PATA support"
+ depends on PCI
+ help
+ This option enables support for the CMD64x series chips
+ except for the CMD640.
+
+ If unsure, say N.
+
+config PATA_CS5520
+ tristate "CS5510/5520 PATA support"
+ depends on PCI
+ help
+ This option enables support for the Cyrix 5510/5520
+ companion chip used with the MediaGX/Geode processor family.
+
+ If unsure, say N.
+
+config PATA_CS5530
+ tristate "CS5530 PATA support"
+ depends on PCI
+ help
+ This option enables support for the Cyrix/NatSemi/AMD CS5530
+ companion chip used with the MediaGX/Geode processor family.
+
+ If unsure, say N.
+
+config PATA_CS5535
+ tristate "CS5535 PATA support (Experimental)"
+ depends on PCI && X86 && !X86_64 && EXPERIMENTAL
+ help
+ This option enables support for the NatSemi/AMD CS5535
+ companion chip used with the Geode processor family.
+
+ If unsure, say N.
+
+config PATA_CS5536
+ tristate "CS5536 PATA support"
+ depends on PCI && X86 && !X86_64
+ help
+ This option enables support for the AMD CS5536
+ companion chip used with the Geode LX processor family.
+
+ If unsure, say N.
+
+config PATA_CYPRESS
+ tristate "Cypress CY82C693 PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the Cypress/Contaq CY82C693
+ chipset found in some Alpha systems
+
+ If unsure, say N.
+
+config PATA_EFAR
+ tristate "EFAR SLC90E66 support"
+ depends on PCI
+ help
+ This option enables support for the EFAR SLC90E66
+ IDE controller found on some older machines.
+
+ If unsure, say N.
+
+config ATA_GENERIC
+ tristate "Generic ATA support"
+ depends on PCI
+ help
+ This option enables support for generic BIOS configured
+ ATA controllers via the new ATA layer
+
+ If unsure, say N.
+
+config PATA_HPT366
+ tristate "HPT 366/368 PATA support"
+ depends on PCI
+ help
+ This option enables support for the HPT 366 and 368
+ PATA controllers via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_HPT37X
+ tristate "HPT 370/370A/371/372/374/302 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the majority of the later HPT
+ PATA controllers via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_HPT3X2N
+ tristate "HPT 372N/302N PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the N variant HPT PATA
+ controllers via the new ATA layer
+
+ If unsure, say N.
+
+config PATA_HPT3X3
+ tristate "HPT 343/363 PATA support"
+ depends on PCI
+ help
+ This option enables support for the HPT 343/363
+ PATA controllers via the new ATA layer
+
+ If unsure, say N.
+
+config PATA_HPT3X3_DMA
+ bool "HPT 343/363 DMA support (Experimental)"
+ depends on PATA_HPT3X3
+ help
+ This option enables DMA support for the HPT343/363
+ controllers. Enable with care as there are still some
+ problems with DMA on this chipset.
+
+config PATA_ISAPNP
+ tristate "ISA Plug and Play PATA support"
+ depends on ISAPNP
+ help
+ This option enables support for ISA plug & play ATA
+ controllers such as those found on old soundcards.
+
+ If unsure, say N.
+
+config PATA_IT821X
+ tristate "IT8211/2 PATA support"
+ depends on PCI
+ help
+ This option enables support for the ITE 8211 and 8212
+ PATA controllers via the new ATA layer, including RAID
+ mode.
+
+ If unsure, say N.
+
+config PATA_IT8213
+ tristate "IT8213 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the ITE 821 PATA
+ controllers via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_JMICRON
+ tristate "JMicron PATA support"
+ depends on PCI
+ help
+ Enable support for the JMicron IDE controller, via the new
+ ATA layer.
+
+ If unsure, say N.
+
+config PATA_LEGACY
+ tristate "Legacy ISA PATA support (Experimental)"
+ depends on ISA && EXPERIMENTAL
+ help
+ This option enables support for ISA/VLB bus legacy PATA
+ ports and allows them to be accessed via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_TRIFLEX
+ tristate "Compaq Triflex PATA support"
+ depends on PCI
+ help
+ Enable support for the Compaq 'Triflex' IDE controller as found
+ on many Compaq Pentium-Pro systems, via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_MARVELL
+ tristate "Marvell PATA support via legacy mode"
+ depends on PCI
+ help
+ This option enables limited support for the Marvell 88SE61xx ATA
+ controllers. If you wish to use only the SATA ports then select
+ the AHCI driver alone. If you wish to the use the PATA port or
+ both SATA and PATA include this driver.
+
+ If unsure, say N.
+
+config PATA_MPC52xx
+ tristate "Freescale MPC52xx SoC internal IDE"
+ depends on PPC_MPC52xx
+ help
+ This option enables support for integrated IDE controller
+ of the Freescale MPC52xx SoC.
+
+ If unsure, say N.
+
+config PATA_MPIIX
+ tristate "Intel PATA MPIIX support"
+ depends on PCI
+ help
+ This option enables support for MPIIX PATA support.
+
+ If unsure, say N.
+
+config PATA_OLDPIIX
+ tristate "Intel PATA old PIIX support"
+ depends on PCI
+ help
+ This option enables support for early PIIX PATA support.
+
+ If unsure, say N.
+
+config PATA_NETCELL
+ tristate "NETCELL Revolution RAID support"
+ depends on PCI
+ help
+ This option enables support for the Netcell Revolution RAID
+ PATA controller.
+
+ If unsure, say N.
+
+config PATA_NINJA32
+ tristate "Ninja32/Delkin Cardbus ATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the Ninja32, Delkin and
+ possibly other brands of Cardbus ATA adapter
+
+ If unsure, say N.
+
+config PATA_NS87410
+ tristate "Nat Semi NS87410 PATA support"
+ depends on PCI
+ help
+ This option enables support for the National Semiconductor
+ NS87410 PCI-IDE controller.
+
+ If unsure, say N.
+
+config PATA_NS87415
+ tristate "Nat Semi NS87415 PATA support"
+ depends on PCI
+ help
+ This option enables support for the National Semiconductor
+ NS87415 PCI-IDE controller.
+
+ If unsure, say N.
+
+config PATA_OPTI
+ tristate "OPTI621/6215 PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables full PIO support for the early Opti ATA
+ controllers found on some old motherboards.
+
+ If unsure, say N.
+
+config PATA_OPTIDMA
+ tristate "OPTI FireStar PATA support (Very Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables DMA/PIO support for the later OPTi
+ controllers found on some old motherboards and in some
+ laptops.
+
+ If unsure, say N.
+
+config PATA_PCMCIA
+ tristate "PCMCIA PATA support"
+ depends on PCMCIA
+ help
+ This option enables support for PCMCIA ATA interfaces, including
+ compact flash card adapters via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_PDC_OLD
+ tristate "Older Promise PATA controller support"
+ depends on PCI
+ help
+ This option enables support for the Promise 20246, 20262, 20263,
+ 20265 and 20267 adapters.
+
+ If unsure, say N.
+
+config PATA_QDI
+ tristate "QDI VLB PATA support"
+ depends on ISA
+ help
+ Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
+
+config PATA_RADISYS
+ tristate "RADISYS 82600 PATA support (Experimental)"
+ depends on PCI && EXPERIMENTAL
+ help
+ This option enables support for the RADISYS 82600
+ PATA controllers via the new ATA layer
+
+ If unsure, say N.
+
+config PATA_RB532
+ tristate "RouterBoard 532 PATA CompactFlash support"
+ depends on MIKROTIK_RB532
+ help
+ This option enables support for the RouterBoard 532
+ PATA CompactFlash controller.
+
+ If unsure, say N.
+
+config PATA_RZ1000
+ tristate "PC Tech RZ1000 PATA support"
+ depends on PCI
+ help
+ This option enables basic support for the PC Tech RZ1000/1
+ PATA controllers via the new ATA layer
+
+ If unsure, say N.
+
+config PATA_SC1200
+ tristate "SC1200 PATA support"
+ depends on PCI
+ help
+ This option enables support for the NatSemi/AMD SC1200 SoC
+ companion chip used with the Geode processor family.
+
+ If unsure, say N.
+
+config PATA_SERVERWORKS
+ tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support"
+ depends on PCI
+ help
+ This option enables support for the Serverworks OSB4/CSB5/CSB6 and
+ HT1000 PATA controllers, via the new ATA layer.
+
+ If unsure, say N.
+
+config PATA_PDC2027X
+ tristate "Promise PATA 2027x support"
+ depends on PCI
+ help
+ This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
+
+ If unsure, say N.
+
+config PATA_SIL680
+ tristate "CMD / Silicon Image 680 PATA support"
+ depends on PCI
+ help
+ This option enables support for CMD / Silicon Image 680 PATA.
+
+ If unsure, say N.
+
+config PATA_SIS
+ tristate "SiS PATA support"
+ depends on PCI
+ help
+ This option enables support for SiS PATA controllers
+
+ If unsure, say N.
+
+config PATA_VIA
+ tristate "VIA PATA support"
+ depends on PCI
+ help
+ This option enables support for the VIA PATA interfaces
+ found on the many VIA chipsets.
+
+ If unsure, say N.
+
+config PATA_WINBOND
+ tristate "Winbond SL82C105 PATA support"
+ depends on PCI
+ help
+ This option enables support for SL82C105 PATA devices found in the
+ Netwinder and some other systems
+
+ If unsure, say N.
+
+config PATA_WINBOND_VLB
+ tristate "Winbond W83759A VLB PATA support (Experimental)"
+ depends on ISA && EXPERIMENTAL
+ help
+ Support for the Winbond W83759A controller on Vesa Local Bus
+ systems.
+
+config HAVE_PATA_PLATFORM
+ bool
+ help
+ This is an internal configuration node for any machine that
+ uses pata-platform driver to enable the relevant driver in the
+ configuration structure without having to submit endless patches
+ to update the PATA_PLATFORM entry.
+
+config PATA_PLATFORM
+ tristate "Generic platform device PATA support"
+ depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
+ help
+ This option enables support for generic directly connected ATA
+ devices commonly found on embedded systems.
+
+ If unsure, say N.
+
+config PATA_OF_PLATFORM
+ tristate "OpenFirmware platform device PATA support"
+ depends on PATA_PLATFORM && PPC_OF
+ help
+ This option enables support for generic directly connected ATA
+ devices commonly found on embedded systems with OpenFirmware
+ bindings.
+
+ If unsure, say N.
+
+config PATA_ICSIDE
+ tristate "Acorn ICS PATA support"
+ depends on ARM && ARCH_ACORN
+ help
+ On Acorn systems, say Y here if you wish to use the ICS PATA
+ interface card. This is not required for ICS partition support.
+ If you are unsure, say N to this.
+
+config PATA_IXP4XX_CF
+ tristate "IXP4XX Compact Flash support"
+ depends on ARCH_IXP4XX
+ help
+ This option enables support for a Compact Flash connected on
+ the ixp4xx expansion bus. This driver had been written for
+ Loft/Avila boards in mind but can work with others.
+
+ If unsure, say N.
+
+config PATA_SCC
+ tristate "Toshiba's Cell Reference Set IDE support"
+ depends on PCI && PPC_CELLEB
+ help
+ This option enables support for the built-in IDE controller on
+ Toshiba Cell Reference Board.
+
+ If unsure, say N.
+
+config PATA_SCH
+ tristate "Intel SCH PATA support"
+ depends on PCI
+ help
+ This option enables support for Intel SCH PATA on the Intel
+ SCH (US15W, US15L, UL11L) series host controllers.
+
+ If unsure, say N.
+
+config PATA_BF54X
+ tristate "Blackfin 54x ATAPI support"
+ depends on BF542 || BF548 || BF549
+ help
+ This option enables support for the built-in ATAPI controller on
+ Blackfin 54x family chips.
+
+ If unsure, say N.
+
+endif # ATA_SFF
+endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
new file mode 100644
index 0000000..674965f
--- /dev/null
+++ b/drivers/ata/Makefile
@@ -0,0 +1,85 @@
+
+obj-$(CONFIG_ATA) += libata.o
+
+obj-$(CONFIG_SATA_AHCI) += ahci.o
+obj-$(CONFIG_SATA_SVW) += sata_svw.o
+obj-$(CONFIG_ATA_PIIX) += ata_piix.o
+obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
+obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
+obj-$(CONFIG_SATA_SIL) += sata_sil.o
+obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
+obj-$(CONFIG_SATA_VIA) += sata_via.o
+obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
+obj-$(CONFIG_SATA_SIS) += sata_sis.o
+obj-$(CONFIG_SATA_SX4) += sata_sx4.o
+obj-$(CONFIG_SATA_NV) += sata_nv.o
+obj-$(CONFIG_SATA_ULI) += sata_uli.o
+obj-$(CONFIG_SATA_MV) += sata_mv.o
+obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
+obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+
+obj-$(CONFIG_PATA_ALI) += pata_ali.o
+obj-$(CONFIG_PATA_AMD) += pata_amd.o
+obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
+obj-$(CONFIG_PATA_AT32) += pata_at32.o
+obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
+obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
+obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
+obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
+obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
+obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o
+obj-$(CONFIG_PATA_CS5536) += pata_cs5536.o
+obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
+obj-$(CONFIG_PATA_EFAR) += pata_efar.o
+obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
+obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
+obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
+obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
+obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
+obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
+obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
+obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
+obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
+obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
+obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
+obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
+obj-$(CONFIG_PATA_OPTI) += pata_opti.o
+obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
+obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
+obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
+obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
+obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
+obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
+obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
+obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
+obj-$(CONFIG_PATA_QDI) += pata_qdi.o
+obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
+obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
+obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
+obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
+obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
+obj-$(CONFIG_PATA_VIA) += pata_via.o
+obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
+obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
+obj-$(CONFIG_PATA_SIS) += pata_sis.o
+obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
+obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
+obj-$(CONFIG_PATA_SCC) += pata_scc.o
+obj-$(CONFIG_PATA_SCH) += pata_sch.o
+obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
+obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
+obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
+obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+# Should be last but two libata driver
+obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
+# Should be last but one libata driver
+obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
+# Should be last libata driver
+obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
+
+libata-objs := libata-core.o libata-scsi.o libata-eh.o
+libata-$(CONFIG_ATA_SFF) += libata-sff.o
+libata-$(CONFIG_SATA_PMP) += libata-pmp.o
+libata-$(CONFIG_ATA_ACPI) += libata-acpi.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
new file mode 100644
index 0000000..a67b8e7
--- /dev/null
+++ b/drivers/ata/ahci.c
@@ -0,0 +1,2718 @@
+/*
+ * ahci.c - AHCI SATA support
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "ahci"
+#define DRV_VERSION "3.0"
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
+static int ahci_skip_host_reset;
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+#define MAX_SLOTS 8
+
+enum {
+ AHCI_PCI_BAR = 5,
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ board_ahci = 0,
+ board_ahci_vt8251 = 1,
+ board_ahci_ign_iferr = 2,
+ board_ahci_sb600 = 3,
+ board_ahci_mv = 4,
+ board_ahci_sb700 = 5,
+ board_ahci_mcp65 = 6,
+ board_ahci_nopmp = 7,
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_SSC = (1 << 14), /* Slumber capable */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ /* hpriv->flags bits */
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+
+ /* ap->flags bits */
+
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+};
+
+struct ahci_cmd_hdr {
+ __le32 opts;
+ __le32 status;
+ __le32 tbl_addr;
+ __le32 tbl_addr_hi;
+ __le32 reserved[4];
+};
+
+struct ahci_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 flags_size;
+};
+
+struct ahci_em_priv {
+ enum sw_activity blink_policy;
+ struct timer_list timer;
+ unsigned long saved_activity;
+ unsigned long activity;
+ unsigned long led_state;
+};
+
+struct ahci_host_priv {
+ unsigned int flags; /* AHCI_HFLAG_* */
+ u32 cap; /* cap to use */
+ u32 port_map; /* port map to use */
+ u32 saved_cap; /* saved initial cap */
+ u32 saved_port_map; /* saved initial port_map */
+ u32 em_loc; /* enclosure management location */
+};
+
+struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+ /* for NCQ spurious interrupt analysis */
+ unsigned int ncq_saw_d2h:1;
+ unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
+ u32 intr_mask; /* interrupts to enable */
+ struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
+ * per PM slot */
+};
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static void ahci_dev_config(struct ata_device *dev);
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int ahci_pci_device_resume(struct pci_dev *pdev);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+ enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+ &dev_attr_em_message,
+ NULL
+};
+
+static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
+ &dev_attr_unload_heads,
+ NULL
+};
+
+static struct scsi_host_template ahci_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = AHCI_MAX_CMDS - 1,
+ .sg_tablesize = AHCI_MAX_SG,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .shost_attrs = ahci_shost_attrs,
+ .sdev_attrs = ahci_sdev_attrs,
+};
+
+static struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+ .qc_fill_rtf = ahci_qc_fill_rtf,
+
+ .freeze = ahci_freeze,
+ .thaw = ahci_thaw,
+ .softreset = ahci_softreset,
+ .hardreset = ahci_hardreset,
+ .postreset = ahci_postreset,
+ .pmp_softreset = ahci_softreset,
+ .error_handler = ahci_error_handler,
+ .post_internal_cmd = ahci_post_internal_cmd,
+ .dev_config = ahci_dev_config,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+ .pmp_attach = ahci_pmp_attach,
+ .pmp_detach = ahci_pmp_detach,
+
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
+ .em_show = ahci_led_show,
+ .em_store = ahci_led_store,
+ .sw_activity_show = ahci_activity_show,
+ .sw_activity_store = ahci_activity_store,
+#ifdef CONFIG_PM
+ .port_suspend = ahci_port_suspend,
+ .port_resume = ahci_port_resume,
+#endif
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+};
+
+static struct ata_port_operations ahci_vt8251_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_vt8251_hardreset,
+};
+
+static struct ata_port_operations ahci_p5wdh_ops = {
+ .inherits = &ahci_ops,
+ .hardreset = ahci_p5wdh_hardreset,
+};
+
+static struct ata_port_operations ahci_sb600_ops = {
+ .inherits = &ahci_ops,
+ .softreset = ahci_sb600_softreset,
+ .pmp_softreset = ahci_sb600_softreset,
+};
+
+#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
+
+static const struct ata_port_info ahci_port_info[] = {
+ /* board_ahci */
+ {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ /* board_ahci_vt8251 */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_vt8251_ops,
+ },
+ /* board_ahci_ign_iferr */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ /* board_ahci_sb600 */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
+ AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_SECT255),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_sb600_ops,
+ },
+ /* board_ahci_mv */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ /* board_ahci_sb700 */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_sb600_ops,
+ },
+ /* board_ahci_mcp65 */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ /* board_ahci_nopmp */
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+};
+
+static const struct pci_device_id ahci_pci_tbl[] = {
+ /* Intel */
+ { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
+ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
+ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+ { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
+ { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
+ { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
+
+ /* ATI */
+ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
+ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
+ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
+ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
+ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
+ { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
+ { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+
+ /* VIA */
+ { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
+ { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
+
+ /* NVIDIA */
+ { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
+ { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
+
+ /* SiS */
+ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
+ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
+ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+
+ /* Marvell */
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+
+ /* Promise */
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+
+ { } /* terminate list */
+};
+
+
+static struct pci_driver ahci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ahci_pci_tbl,
+ .probe = ahci_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ahci_pci_device_suspend,
+ .resume = ahci_pci_device_resume,
+#endif
+};
+
+static int ahci_em_messages = 1;
+module_param(ahci_em_messages, int, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+ "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
+
+#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
+static int marvell_enable;
+#else
+static int marvell_enable = 1;
+#endif
+module_param(marvell_enable, int, 0644);
+MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+
+
+static inline int ahci_nr_ports(u32 cap)
+{
+ return (cap & 0x1f) + 1;
+}
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+ unsigned int port_no)
+{
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+
+ return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+ return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+ int i;
+ u32 tmp;
+
+ /* turn on AHCI_EN */
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_AHCI_EN)
+ return;
+
+ /* Some controllers need AHCI_EN to be written multiple times.
+ * Try a few times before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
+ if (tmp & HOST_AHCI_EN)
+ return;
+ msleep(10);
+ }
+
+ WARN_ON(1);
+}
+
+/**
+ * ahci_save_initial_config - Save and fixup initial config values
+ * @pdev: target PCI device
+ * @hpriv: host private area to store config values
+ *
+ * Some registers containing configuration info might be setup by
+ * BIOS and might be cleared on reset. This function saves the
+ * initial values of those registers into @hpriv such that they
+ * can be restored after controller reset.
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+{
+ void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ u32 cap, port_map;
+ int i;
+ int mv;
+
+ /* make sure AHCI mode is enabled before accessing CAP */
+ ahci_enable_ahci(mmio);
+
+ /* Values prefixed with saved_ are written back to host after
+ * reset. Values without are used for driver operation.
+ */
+ hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+ /* some chips have errata preventing 64bit use */
+ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "controller can't do 64bit DMA, forcing 32bit\n");
+ cap &= ~HOST_CAP_64;
+ }
+
+ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
+ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "controller can do NCQ, turning on CAP_NCQ\n");
+ cap |= HOST_CAP_NCQ;
+ }
+
+ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "controller can't do PMP, turning off CAP_PMP\n");
+ cap &= ~HOST_CAP_PMP;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
+ port_map != 1) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
+ port_map, 1);
+ port_map = 1;
+ }
+
+ /*
+ * Temporary Marvell 6145 hack: PATA port presence
+ * is asserted through the standard AHCI port
+ * presence register, as bit 4 (counting from 0)
+ */
+ if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+ if (pdev->device == 0x6121)
+ mv = 0x3;
+ else
+ mv = 0xf;
+ dev_printk(KERN_ERR, &pdev->dev,
+ "MV_AHCI HACK: port_map %x -> %x\n",
+ port_map,
+ port_map & mv);
+ dev_printk(KERN_ERR, &pdev->dev,
+ "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
+
+ port_map &= mv;
+ }
+
+ /* cross check port_map and cap.n_ports */
+ if (port_map) {
+ int map_ports = 0;
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
+
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
+ */
+ if (map_ports > ahci_nr_ports(cap)) {
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
+ port_map = 0;
+ }
+ }
+
+ /* fabricate port_map from cap.nr_ports */
+ if (!port_map) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+ /* write the fixed up value to the PI register */
+ hpriv->saved_port_map = port_map;
+ }
+
+ /* record values to use during operation */
+ hpriv->cap = cap;
+ hpriv->port_map = port_map;
+}
+
+/**
+ * ahci_restore_initial_config - Restore initial config
+ * @host: target ATA host
+ *
+ * Restore initial config stored by ahci_save_initial_config().
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+
+ writel(hpriv->saved_cap, mmio + HOST_CAP);
+ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ahci_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+}
+
+static int ahci_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+
+static void ahci_start_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* set FIS registers */
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16,
+ port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16,
+ port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+ /* enable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* flush */
+ readl(port_mmio + PORT_CMD);
+}
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* disable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for completion, spec says 500ms, give it 1000 */
+ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+ PORT_CMD_FIS_ON, 10, 1000);
+ if (tmp & PORT_CMD_FIS_ON)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+ /* spin up device */
+ if (hpriv->cap & HOST_CAP_SSS) {
+ cmd |= PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+ }
+
+ /* wake up link */
+ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd, scontrol;
+
+ if (!(hpriv->cap & HOST_CAP_SSS))
+ return;
+
+ /* put device into listen mode, first set PxSCTL.DET to 0 */
+ scontrol = readl(port_mmio + PORT_SCR_CTL);
+ scontrol &= ~0xf;
+ writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+ /* then set PxCMD.SUD to 0 */
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+ cmd &= ~PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+
+ /* enable FIS reception */
+ ahci_start_fis_rx(ap);
+
+ /* enable DMA */
+ ahci_start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+ ata_port_for_each_link(link, ap) {
+ emp = &pp->em_priv[link->pmp];
+ ahci_transmit_led_message(ap, emp->led_state, 4);
+ }
+ }
+
+ if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+ ata_port_for_each_link(link, ap)
+ ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+ int rc;
+
+ /* disable DMA */
+ rc = ahci_stop_engine(ap);
+ if (rc) {
+ *emsg = "failed to stop engine";
+ return rc;
+ }
+
+ /* disable FIS reception */
+ rc = ahci_stop_fis_rx(ap);
+ if (rc) {
+ *emsg = "failed stop FIS RX";
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ahci_reset_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ u32 tmp;
+
+ /* we must be in AHCI mode, before using anything
+ * AHCI-specific, such as HOST_RESET.
+ */
+ ahci_enable_ahci(mmio);
+
+ /* global controller reset */
+ if (!ahci_skip_host_reset) {
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /*
+ * to perform host reset, OS should set HOST_RESET
+ * and poll until this bit is read to be "0".
+ * reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
+ HOST_RESET, 10, 1000);
+
+ if (tmp & HOST_RESET) {
+ dev_printk(KERN_ERR, host->dev,
+ "controller reset failed (0x%x)\n", tmp);
+ return -EIO;
+ }
+
+ /* turn on AHCI mode */
+ ahci_enable_ahci(mmio);
+
+ /* Some registers might be cleared on reset. Restore
+ * initial values.
+ */
+ ahci_restore_initial_config(host);
+ } else
+ dev_printk(KERN_INFO, host->dev,
+ "skipping global host reset\n");
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ u16 tmp16;
+
+ /* configure PCS */
+ pci_read_config_word(pdev, 0x92, &tmp16);
+ if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+ tmp16 |= hpriv->port_map;
+ pci_write_config_word(pdev, 0x92, tmp16);
+ }
+ }
+
+ return 0;
+}
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+ return;
+
+ emp->activity++;
+ if (!timer_pending(&emp->timer))
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(unsigned long arg)
+{
+ struct ata_link *link = (struct ata_link *)arg;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ unsigned long led_message = emp->led_state;
+ u32 activity_led_state;
+ unsigned long flags;
+
+ led_message &= EM_MSG_LED_VALUE;
+ led_message |= ap->port_no | (link->pmp << 8);
+
+ /* check to see if we've had activity. If so,
+ * toggle state of LED and reset timer. If not,
+ * turn LED to desired idle state.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ if (emp->saved_activity != emp->activity) {
+ emp->saved_activity = emp->activity;
+ /* get the current LED state */
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+ if (activity_led_state)
+ activity_led_state = 0;
+ else
+ activity_led_state = 1;
+
+ /* clear old state */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ /* toggle state */
+ led_message |= (activity_led_state << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+ } else {
+ /* switch to idle */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+ if (emp->blink_policy == BLINK_OFF)
+ led_message |= (1 << 16);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ahci_transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* init activity stats, setup timer */
+ emp->saved_activity = emp->activity = 0;
+ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+
+ /* check our blink policy and set flag for link if it's enabled */
+ if (emp->blink_policy)
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+static int ahci_reset_em(struct ata_host *host)
+{
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+ return -EINVAL;
+
+ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+ return 0;
+}
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+ u32 em_ctl;
+ u32 message[] = {0, 0};
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * if we are still busy transmitting a previous message,
+ * do not allow
+ */
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * create message header - this is all zero except for
+ * the message size, which is 4 bytes.
+ */
+ message[0] |= (4 << 8);
+
+ /* ignore 0:4 of byte zero, fill in port info yourself */
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+ /* write message to EM_LOC */
+ writel(message[0], mmio + hpriv->em_loc);
+ writel(message[1], mmio + hpriv->em_loc+4);
+
+ /* save off new led state for port/slot */
+ emp->led_state = message[1];
+
+ /*
+ * tell hardware to transmit the message
+ */
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ int rc = 0;
+
+ ata_port_for_each_link(link, ap) {
+ emp = &pp->em_priv[link->pmp];
+ rc += sprintf(buf, "%lx\n", emp->led_state);
+ }
+ return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size)
+{
+ int state;
+ int pmp;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp;
+
+ state = simple_strtoul(buf, NULL, 0);
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+ * activity led through em_message
+ */
+ if (emp->blink_policy)
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ return ahci_transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ u32 port_led_state = emp->led_state;
+
+ /* save the desired Activity LED behavior */
+ if (val == OFF) {
+ /* clear LFLAG */
+ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+ /* set the LED to OFF */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ } else {
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+ if (val == BLINK_OFF) {
+ /* set LED to ON for idle */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ }
+ }
+ emp->blink_policy = val;
+ return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* display the saved value of activity behavior for this
+ * disk.
+ */
+ return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
+ int port_no, void __iomem *mmio,
+ void __iomem *port_mmio)
+{
+ const char *emsg = NULL;
+ int rc;
+ u32 tmp;
+
+ /* make sure port is not active */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "%s (%d)\n", emsg, rc);
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
+static void ahci_init_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ int i;
+ void __iomem *port_mmio;
+ u32 tmp;
+ int mv;
+
+ if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
+ if (pdev->device == 0x6121)
+ mv = 2;
+ else
+ mv = 4;
+ port_mmio = __ahci_port_base(host, mv);
+
+ writel(0, port_mmio + PORT_IRQ_MASK);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ }
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ port_mmio = ahci_port_base(ap);
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ahci_port_init(pdev, ap, i, mmio, port_mmio);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ dev->max_sectors = 255;
+ ata_dev_printk(dev, KERN_INFO,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ }
+}
+
+static unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
+{
+ dma_addr_t cmd_tbl_dma;
+
+ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+ pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+ pp->cmd_slot[tag].status = 0;
+ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+
+static int ahci_kick_engine(struct ata_port *ap, int force_restart)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 tmp;
+ int busy, rc;
+
+ /* do we need to kick the port? */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !force_restart)
+ return 0;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
+
+ /* need to do CLO? */
+ if (!busy) {
+ rc = 0;
+ goto out_restart;
+ }
+
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_CLO;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ rc = 0;
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+ if (tmp & PORT_CMD_CLO)
+ rc = -EIO;
+
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
+}
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+{
+ const u32 cmd_fis_len = 5; /* five dwords */
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap, 1);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ struct ata_port *ap = link->ap;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* prepare for SRST (AHCI-1.1 10.4.1) */
+ rc = ahci_kick_engine(ap, 1);
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING,
+ "failed to reset engine (errno=%d)\n", rc);
+
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
+
+ tf.ctl |= ATA_SRST;
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+ rc = -EIO;
+ reason = "1st FIS failed";
+ goto fail;
+ }
+
+ /* spec says at least 5us, but be generous and sleep for 1ms */
+ msleep(1);
+
+ /* issue the second D2H Register FIS */
+ tf.ctl &= ~ATA_SRST;
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, check_ready);
+ /* link occupied, -ENODEV too is an error */
+ if (rc) {
+ reason = "device not ready";
+ goto fail;
+ }
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ fail:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return rc;
+}
+
+static int ahci_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ return ata_check_ready(status);
+}
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+
+ DPRINTK("ENTER\n");
+
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+
+static int ahci_sb600_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
+
+ /*
+ * There is no need to check TFDATA if BAD PMP is found due to HW bug,
+ * which can save timeout delay.
+ */
+ if (irq_status & PORT_IRQ_BAD_PMP)
+ return -EIO;
+
+ return ata_check_ready(status);
+}
+
+static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ int pmp = sata_srst_pmp(link);
+ int rc;
+ u32 irq_sts;
+
+ DPRINTK("ENTER\n");
+
+ rc = ahci_do_softreset(link, class, pmp, deadline,
+ ahci_sb600_check_ready);
+
+ /*
+ * Soft reset fails on some ATI chips with IPMS set when PMP
+ * is enabled but SATA HDD/ODD is connected to SATA port,
+ * do soft reset again to port 0.
+ */
+ if (rc == -EIO) {
+ irq_sts = readl(port_mmio + PORT_IRQ_STAT);
+ if (irq_sts & PORT_IRQ_BAD_PMP) {
+ ata_link_printk(link, KERN_WARNING,
+ "failed due to HW bug, retry pmp=0\n");
+ rc = ahci_do_softreset(link, class, 0, deadline,
+ ahci_check_ready);
+ }
+ }
+
+ return rc;
+}
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+ ahci_start_engine(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+
+ /* vt8251 doesn't clear BSY on signature FIS reception,
+ * request follow-up softreset.
+ */
+ return online ? -EAGAIN : rc;
+}
+
+static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ deadline, &online, NULL);
+
+ ahci_start_engine(ap);
+
+ /* The pseudo configuration device on SIMG4726 attached to
+ * ASUS P5W-DH Deluxe doesn't send signature FIS after
+ * hardreset if no device is attached to the first downstream
+ * port && the pseudo device locks up on SRST w/ PMP==0. To
+ * work around this, wait for !BSY only briefly. If BSY isn't
+ * cleared, perform CLO and proceed to IDENTIFY (achieved by
+ * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
+ *
+ * Wait for two seconds. Devices attached to downstream port
+ * which can't process the following IDENTIFY after this will
+ * have to be reset again. For most cases, this should
+ * suffice while making probing snappish enough.
+ */
+ if (online) {
+ rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
+ ahci_check_ready);
+ if (rc)
+ ahci_kick_engine(ap, 0);
+ }
+ return rc;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 new_tmp, tmp;
+
+ ata_std_postreset(link, class);
+
+ /* Make sure port's ATAPI bit is set appropriately */
+ new_tmp = tmp = readl(port_mmio + PORT_CMD);
+ if (*class == ATA_DEV_ATAPI)
+ new_tmp |= PORT_CMD_ATAPI;
+ else
+ new_tmp &= ~PORT_CMD_ATAPI;
+ if (new_tmp != tmp) {
+ writel(new_tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+ }
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ }
+
+ return si;
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ */
+ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_eh_info *host_ehi = &ap->link.eh_info;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *active_qc;
+ struct ata_eh_info *active_ehi;
+ u32 serror;
+
+ /* determine active link */
+ ata_port_for_each_link(link, ap)
+ if (ata_link_active(link))
+ break;
+ if (!link)
+ link = &ap->link;
+
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ active_ehi = &link->eh_info;
+
+ /* record irq stat */
+ ata_ehi_clear_desc(host_ehi);
+ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
+ host_ehi->serror |= serror;
+
+ /* some controllers set IRQ_IF_ERR on device errors, ignore it */
+ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+ irq_stat &= ~PORT_IRQ_IF_ERR;
+
+ if (irq_stat & PORT_IRQ_TF_ERR) {
+ /* If qc is active, charge it; otherwise, the active
+ * link. There's no active qc on NCQ errors. It will
+ * be determined by EH by reading log page 10h.
+ */
+ if (active_qc)
+ active_qc->err_mask |= AC_ERR_DEV;
+ else
+ active_ehi->err_mask |= AC_ERR_DEV;
+
+ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+ host_ehi->serror &= ~SERR_INTERNAL;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi,
+ "unknown FIS %08x %08x %08x %08x" ,
+ unk[0], unk[1], unk[2], unk[3]);
+ }
+
+ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi, "incorrect PMP");
+ }
+
+ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+ host_ehi->err_mask |= AC_ERR_HOST_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "host bus error");
+ }
+
+ if (irq_stat & PORT_IRQ_IF_ERR) {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "interface fatal error");
+ }
+
+ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+ ata_ehi_hotplugged(host_ehi);
+ ata_ehi_push_desc(host_ehi, "%s",
+ irq_stat & PORT_IRQ_CONNECT ?
+ "connection status changed" : "PHY RDY changed");
+ }
+
+ /* okay, let's hand over to EH */
+
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+ u32 status, qc_active;
+ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ /* ignore BAD_PMP while resetting */
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+ if (unlikely(status & PORT_IRQ_ERROR)) {
+ ahci_error_intr(ap, status);
+ return;
+ }
+
+ if (status & PORT_IRQ_SDB_FIS) {
+ /* If SNotification is available, leave notification
+ * handling to sata_async_notification(). If not,
+ * emulate it by snooping SDB FIS RX area.
+ *
+ * Snooping FIS RX area is probably cheaper than
+ * poking SNotification but some constrollers which
+ * implement SNotification, ICH9 for example, don't
+ * store AN SDB FIS into receive area.
+ */
+ if (hpriv->cap & HOST_CAP_SNTF)
+ sata_async_notification(ap);
+ else {
+ /* If the 'N' bit in word 0 of the FIS is set,
+ * we just received asynchronous notification.
+ * Tell libata about it.
+ */
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
+ }
+
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+
+ /* while resetting, invalid completions are expected */
+ if (unlikely(rc < 0 && !resetting)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = host->iomap[AHCI_PCI_BAR];
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_port_intr(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+
+ handled = 1;
+ }
+
+ /* HOST_IRQ_STAT behaves as level triggered latch meaning that
+ * it should be cleared after all the port events are cleared;
+ * otherwise, it will raise a spurious interrupt after each
+ * valid one. Please read section 10.6.2 of ahci 1.1 for more
+ * information.
+ *
+ * Also, use the unmasked value to clear interrupt as spurious
+ * pending event on a dummy port might cause screaming IRQ.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* Keep track of the currently active link. It will be used
+ * in completion path to determine whether NCQ phase is in
+ * progress.
+ */
+ pp->active_link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
+
+ ahci_sw_activity(qc->dev->link);
+
+ return 0;
+}
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+ ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ /* turn IRQ off */
+ writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* clear IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+ ahci_start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap, 1);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd |= PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask |= PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd &= ~PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ if (sata_pmp_attached(ap))
+ ahci_pmp_attach(ap);
+ else
+ ahci_pmp_detach(ap);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc == 0)
+ ahci_power_down(ap);
+ else {
+ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ahci_start_port(ap);
+ }
+
+ return rc;
+}
+
+static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ u32 ctl;
+
+ if (mesg.event & PM_EVENT_SLEEP) {
+ /* AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ return ata_pci_device_suspend(pdev, mesg);
+}
+
+static int ahci_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
+ GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += AHCI_RX_FIS_SZ;
+ mem_dma += AHCI_RX_FIS_SZ;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ /* de-initialize port */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+}
+
+static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+{
+ int rc;
+
+ if (using_dac &&
+ !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static void ahci_print_info(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ u32 vers, cap, impl, speed;
+ const char *speed_s;
+ u16 cc;
+ const char *scc_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else
+ speed_s = "?";
+
+ pci_read_config_word(pdev, 0x0a, &cc);
+ if (cc == PCI_CLASS_STORAGE_IDE)
+ scc_s = "IDE";
+ else if (cc == PCI_CLASS_STORAGE_SATA)
+ scc_s = "SATA";
+ else if (cc == PCI_CLASS_STORAGE_RAID)
+ scc_s = "RAID";
+ else
+ scc_s = "unknown";
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "flags: "
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s"
+ "%s\n"
+ ,
+
+ cap & (1 << 31) ? "64bit " : "",
+ cap & (1 << 30) ? "ncq " : "",
+ cap & (1 << 29) ? "sntf " : "",
+ cap & (1 << 28) ? "ilck " : "",
+ cap & (1 << 27) ? "stag " : "",
+ cap & (1 << 26) ? "pm " : "",
+ cap & (1 << 25) ? "led " : "",
+
+ cap & (1 << 24) ? "clo " : "",
+ cap & (1 << 19) ? "nz " : "",
+ cap & (1 << 18) ? "only " : "",
+ cap & (1 << 17) ? "pmp " : "",
+ cap & (1 << 15) ? "pio " : "",
+ cap & (1 << 14) ? "slum " : "",
+ cap & (1 << 13) ? "part " : "",
+ cap & (1 << 6) ? "ems ": ""
+ );
+}
+
+/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
+ * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
+ * support PMP and the 4726 either directly exports the device
+ * attached to the first downstream port or acts as a hardware storage
+ * controller and emulate a single ATA device (can be RAID 0/1 or some
+ * other configuration).
+ *
+ * When there's no device attached to the first downstream port of the
+ * 4726, "Config Disk" appears, which is a pseudo ATA device to
+ * configure the 4726. However, ATA emulation of the device is very
+ * lame. It doesn't send signature D2H Reg FIS after the initial
+ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
+ *
+ * The following function works around the problem by always using
+ * hardreset on the port and not depending on receiving signature FIS
+ * afterward. If signature FIS isn't received soon, ATA class is
+ * assumed without follow-up softreset.
+ */
+static void ahci_p5wdh_workaround(struct ata_host *host)
+{
+ static struct dmi_system_id sysids[] = {
+ {
+ .ident = "P5W DH Deluxe",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "ASUSTEK COMPUTER INC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
+ },
+ },
+ { }
+ };
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+
+ if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
+ dmi_check_system(sysids)) {
+ struct ata_port *ap = host->ports[1];
+
+ dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
+ "Deluxe on-board SIMG4726 workaround\n");
+
+ ap->ops = &ahci_p5wdh_ops;
+ ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
+ }
+}
+
+static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_id = ent->driver_data;
+ struct ata_port_info pi = ahci_port_info[board_id];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct device *dev = &pdev->dev;
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ int n_ports, i, rc;
+
+ VPRINTK("ENTER\n");
+
+ WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* The AHCI driver can only drive the SATA ports, the PATA driver
+ can drive them all so if both drivers are selected make sure
+ AHCI stays out of the way */
+ if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
+ return -ENODEV;
+
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == 0x2652 || pdev->device == 0x2653)) {
+ u8 map;
+
+ /* ICH6s share the same PCI ID for both piix and ahci
+ * modes. Enabling ahci mode while MAP indicates
+ * combined mode is a bad idea. Yield to ata_piix.
+ */
+ pci_read_config_byte(pdev, ICH_MAP, &map);
+ if (map & 0x3) {
+ dev_printk(KERN_INFO, &pdev->dev, "controller is in "
+ "combined mode, can't enable AHCI mode\n");
+ return -ENODEV;
+ }
+ }
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ /* MCP65 revision A1 and A2 can't do MSI */
+ if (board_id == board_ahci_mcp65 &&
+ (pdev->revision == 0xa1 || pdev->revision == 0xa2))
+ hpriv->flags |= AHCI_HFLAG_NO_MSI;
+
+ if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
+ pci_intx(pdev, 1);
+
+ /* save initial config */
+ ahci_save_initial_config(pdev, hpriv);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
+ u8 messages;
+ void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ u32 em_loc = readl(mmio + HOST_EM_LOC);
+ u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+ /* we only support LED message type right now */
+ if ((messages & 0x01) && (ahci_em_messages == 1)) {
+ /* store em_loc */
+ hpriv->em_loc = ((em_loc >> 16) * 4);
+ pi.flags |= ATA_FLAG_EM;
+ if (!(em_ctl & EM_CTL_ALHD))
+ pi.flags |= ATA_FLAG_SW_ACTIVITY;
+ }
+ }
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+ host->iomap = pcim_iomap_table(pdev);
+ host->private_data = hpriv;
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+ ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ 0x100 + ap->port_no * 0x80, "port");
+
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = ahci_em_messages;
+
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ /* apply workaround for ASUS P5W DH Deluxe mainboard */
+ ahci_p5wdh_workaround(host);
+
+ /* initialize adapter */
+ rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+ if (rc)
+ return rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ ahci_print_info(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_sht);
+}
+
+static int __init ahci_init(void)
+{
+ return pci_register_driver(&ahci_pci_driver);
+}
+
+static void __exit ahci_exit(void)
+{
+ pci_unregister_driver(&ahci_pci_driver);
+}
+
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ahci_init);
+module_exit(ahci_exit);
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
new file mode 100644
index 0000000..5c33767
--- /dev/null
+++ b/drivers/ata/ata_generic.c
@@ -0,0 +1,214 @@
+/*
+ * ata_generic.c - Generic PATA/SATA controller driver.
+ * Copyright 2005 Red Hat Inc, all rights reserved.
+ *
+ * Elements from ide/pci/generic.c
+ * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
+ * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Driver for PCI IDE interfaces implementing the standard bus mastering
+ * interface functionality. This assumes the BIOS did the drive set up and
+ * tuning for us. By default we do not grab all IDE class devices as they
+ * may have other drivers or need fixups to avoid problems. Instead we keep
+ * a default list of stuff without documentation/driver that appears to
+ * work.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "ata_generic"
+#define DRV_VERSION "0.2.15"
+
+/*
+ * A generic parallel ATA driver using libata
+ */
+
+/**
+ * generic_set_mode - mode setting
+ * @link: link to set up
+ * @unused: returned device on error
+ *
+ * Use a non standard set_mode function. We don't want to be tuned.
+ * The BIOS configured everything. Our job is not to fiddle. We
+ * read the dma enabled bits from the PCI configuration of the device
+ * and respect them.
+ */
+
+static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_port *ap = link->ap;
+ int dma_enabled = 0;
+ struct ata_device *dev;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ /* Bits 5 and 6 indicate if DMA is active on master/slave */
+ if (ap->ioaddr.bmdma_addr)
+ dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+ if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
+ dma_enabled = 0xFF;
+
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ /* We don't really care */
+ dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = XFER_MW_DMA_0;
+ /* We do need the right mode information for DMA or PIO
+ and this comes from the current configuration flags */
+ if (dma_enabled & (1 << (5 + dev->devno))) {
+ unsigned int xfer_mask = ata_id_xfermask(dev->id);
+ const char *name;
+
+ if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+ name = ata_mode_string(xfer_mask);
+ else {
+ /* SWDMA perhaps? */
+ name = "DMA";
+ xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
+ }
+
+ ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
+ name);
+
+ dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
+ dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
+ dev->flags &= ~ATA_DFLAG_PIO;
+ } else {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ }
+ }
+ return 0;
+}
+
+static struct scsi_host_template generic_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations generic_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = generic_set_mode,
+};
+
+static int all_generic_ide; /* Set to claim all devices */
+
+/**
+ * ata_generic_init - attach generic IDE
+ * @dev: PCI device found
+ * @id: match entry
+ *
+ * Called each time a matching IDE interface is found. We check if the
+ * interface is one we wish to claim and if so we perform any chip
+ * specific hacks then let the ATA layer do the heavy lifting.
+ */
+
+static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ u16 command;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &generic_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ /* Don't use the generic entry unless instructed to do so */
+ if (id->driver_data == 1 && all_generic_ide == 0)
+ return -ENODEV;
+
+ /* Devices that need care */
+ if (dev->vendor == PCI_VENDOR_ID_UMC &&
+ dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+ (!(PCI_FUNC(dev->devfn) & 1)))
+ return -ENODEV;
+
+ if (dev->vendor == PCI_VENDOR_ID_OPTI &&
+ dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
+ (!(PCI_FUNC(dev->devfn) & 1)))
+ return -ENODEV;
+
+ /* Don't re-enable devices in generic mode or we will break some
+ motherboards with disabled and unused IDE controllers */
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (!(command & PCI_COMMAND_IO))
+ return -ENODEV;
+
+ if (dev->vendor == PCI_VENDOR_ID_AL)
+ ata_pci_bmdma_clear_simplex(dev);
+
+ if (dev->vendor == PCI_VENDOR_ID_ATI) {
+ int rc = pcim_enable_device(dev);
+ if (rc < 0)
+ return rc;
+ pcim_pin_device(dev);
+ }
+ return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
+}
+
+static struct pci_device_id ata_generic[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
+ { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
+ { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), },
+ { PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
+ { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
+ { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+ { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
+ /* Must come last. If you add entries adjust this table appropriately */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+ { 0, },
+};
+
+static struct pci_driver ata_generic_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ata_generic,
+ .probe = ata_generic_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init ata_generic_init(void)
+{
+ return pci_register_driver(&ata_generic_pci_driver);
+}
+
+
+static void __exit ata_generic_exit(void)
+{
+ pci_unregister_driver(&ata_generic_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for generic ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ata_generic);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ata_generic_init);
+module_exit(ata_generic_exit);
+
+module_param(all_generic_ide, int, 0);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
new file mode 100644
index 0000000..458532d
--- /dev/null
+++ b/drivers/ata/ata_piix.c
@@ -0,0 +1,1556 @@
+/*
+ * ata_piix.c - Intel PATA/SATA controllers
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ *
+ * Copyright 2003-2005 Red Hat Inc
+ * Copyright 2003-2005 Jeff Garzik
+ *
+ *
+ * Copyright header from piix.c:
+ *
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat Inc
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available at http://developer.intel.com/
+ *
+ * Documentation
+ * Publically available from Intel web site. Errata documentation
+ * is also publically available. As an aide to anyone hacking on this
+ * driver the list of errata that are relevant is below, going back to
+ * PIIX4. Older device documentation is now a bit tricky to find.
+ *
+ * The chipsets all follow very much the same design. The orginal Triton
+ * series chipsets do _not_ support independant device timings, but this
+ * is fixed in Triton II. With the odd mobile exception the chips then
+ * change little except in gaining more modes until SATA arrives. This
+ * driver supports only the chips with independant timing (that is those
+ * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
+ * for the early chip drivers.
+ *
+ * Errata of note:
+ *
+ * Unfixable
+ * PIIX4 errata #9 - Only on ultra obscure hw
+ * ICH3 errata #13 - Not observed to affect real hw
+ * by Intel
+ *
+ * Things we must deal with
+ * PIIX4 errata #10 - BM IDE hang with non UDMA
+ * (must stop/start dma to recover)
+ * 440MX errata #15 - As PIIX4 errata #10
+ * PIIX4 errata #15 - Must not read control registers
+ * during a PIO transfer
+ * 440MX errata #13 - As PIIX4 errata #15
+ * ICH2 errata #21 - DMA mode 0 doesn't work right
+ * ICH0/1 errata #55 - As ICH2 errata #21
+ * ICH2 spec c #9 - Extra operations needed to handle
+ * drive hotswap [NOT YET SUPPORTED]
+ * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
+ * and must be dword aligned
+ * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
+ *
+ * Should have been BIOS fixed:
+ * 450NX: errata #19 - DMA hangs on old 450NX
+ * 450NX: errata #20 - DMA hangs on old 450NX
+ * 450NX: errata #25 - Corruption with DMA on old 450NX
+ * ICH3 errata #15 - IDE deadlock under high load
+ * (BIOS must set dev 31 fn 0 bit 23)
+ * ICH3 errata #18 - Don't use native mode
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "ata_piix"
+#define DRV_VERSION "2.12"
+
+enum {
+ PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
+ ICH5_PMR = 0x90, /* port mapping register */
+ ICH5_PCS = 0x92, /* port control and status */
+ PIIX_SIDPR_BAR = 5,
+ PIIX_SIDPR_LEN = 16,
+ PIIX_SIDPR_IDX = 0,
+ PIIX_SIDPR_DATA = 4,
+
+ PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
+ PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */
+
+ PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
+ PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
+
+ PIIX_80C_PRI = (1 << 5) | (1 << 4),
+ PIIX_80C_SEC = (1 << 7) | (1 << 6),
+
+ /* constants for mapping table */
+ P0 = 0, /* port 0 */
+ P1 = 1, /* port 1 */
+ P2 = 2, /* port 2 */
+ P3 = 3, /* port 3 */
+ IDE = -1, /* IDE */
+ NA = -2, /* not avaliable */
+ RV = -3, /* reserved */
+
+ PIIX_AHCI_DEVICE = 6,
+
+ /* host->flags bits */
+ PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
+};
+
+enum piix_controller_ids {
+ /* controller IDs */
+ piix_pata_mwdma, /* PIIX3 MWDMA only */
+ piix_pata_33, /* PIIX4 at 33Mhz */
+ ich_pata_33, /* ICH up to UDMA 33 only */
+ ich_pata_66, /* ICH up to 66 Mhz */
+ ich_pata_100, /* ICH up to UDMA 100 */
+ ich5_sata,
+ ich6_sata,
+ ich6m_sata,
+ ich8_sata,
+ ich8_2port_sata,
+ ich8m_apple_sata, /* locks up on second port enable */
+ tolapai_sata,
+ piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+};
+
+struct piix_map_db {
+ const u32 mask;
+ const u16 port_enable;
+ const int map[][4];
+};
+
+struct piix_host_priv {
+ const int *map;
+ void __iomem *sidpr;
+};
+
+static int piix_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static int ich_pata_cable_detect(struct ata_port *ap);
+static u8 piix_vmw_bmdma_status(struct ata_port *ap);
+static int piix_sidpr_scr_read(struct ata_link *link,
+ unsigned int reg, u32 *val);
+static int piix_sidpr_scr_write(struct ata_link *link,
+ unsigned int reg, u32 val);
+#ifdef CONFIG_PM
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int piix_pci_device_resume(struct pci_dev *pdev);
+#endif
+
+static unsigned int in_module_init = 1;
+
+static const struct pci_device_id piix_pci_tbl[] = {
+ /* Intel PIIX3 for the 430HX etc */
+ { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
+ /* VMware ICH4 */
+ { 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
+ /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
+ /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
+ { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+ /* Intel PIIX4 */
+ { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+ /* Intel PIIX4 */
+ { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+ /* Intel PIIX */
+ { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
+ /* Intel ICH (i810, i815, i840) UDMA 66*/
+ { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
+ /* Intel ICH0 : UDMA 33*/
+ { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
+ /* Intel ICH2M */
+ { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
+ { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* Intel ICH3M */
+ { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* Intel ICH3 (E7500/1) UDMA 100 */
+ { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
+ { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* Intel ICH5 */
+ { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* C-ICH (i810E2) */
+ { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* ESB (855GME/875P + 6300ESB) UDMA 100 */
+ { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* ICH6 (and 6) (i915) UDMA 100 */
+ { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* ICH7/7-R (i945, i975) UDMA 100*/
+ { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+ /* ICH8 Mobile PATA Controller */
+ { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
+
+ /* NOTE: The following PCI ids must be kept in sync with the
+ * list in drivers/pci/quirks.c.
+ */
+
+ /* 82801EB (ICH5) */
+ { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ /* 82801EB (ICH5) */
+ { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ /* 6300ESB (ICH5 variant with broken PCS present bits) */
+ { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ /* 6300ESB pretending RAID */
+ { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
+ /* 82801FB/FW (ICH6/ICH6W) */
+ { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+ /* 82801FR/FRW (ICH6R/ICH6RW) */
+ { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+ /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
+ * Attach iff the controller is in IDE mode. */
+ { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
+ /* 82801GB/GR/GH (ICH7, identical to ICH6) */
+ { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+ /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
+ /* Enterprise Southbridge 2 (631xESB/632xESB) */
+ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
+ /* SATA Controller 1 IDE (ICH8) */
+ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller 2 IDE (ICH8) */
+ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* Mobile SATA Controller IDE (ICH8M), Apple */
+ { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
+ { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
+ { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
+ /* Mobile SATA Controller IDE (ICH8M) */
+ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (ICH9) */
+ { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (ICH9) */
+ { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (ICH9) */
+ { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (ICH9M) */
+ { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (ICH9M) */
+ { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (ICH9M) */
+ { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (Tolapai) */
+ { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
+ /* SATA Controller IDE (ICH10) */
+ { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (ICH10) */
+ { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (ICH10) */
+ { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (ICH10) */
+ { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ { } /* terminate list */
+};
+
+static struct pci_driver piix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = piix_pci_device_suspend,
+ .resume = piix_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template piix_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations piix_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = piix_set_piomode,
+ .set_dmamode = piix_set_dmamode,
+ .prereset = piix_pata_prereset,
+};
+
+static struct ata_port_operations piix_vmw_ops = {
+ .inherits = &piix_pata_ops,
+ .bmdma_status = piix_vmw_bmdma_status,
+};
+
+static struct ata_port_operations ich_pata_ops = {
+ .inherits = &piix_pata_ops,
+ .cable_detect = ich_pata_cable_detect,
+ .set_dmamode = ich_set_dmamode,
+};
+
+static struct ata_port_operations piix_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+};
+
+static struct ata_port_operations piix_sidpr_sata_ops = {
+ .inherits = &piix_sata_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = piix_sidpr_scr_read,
+ .scr_write = piix_sidpr_scr_write,
+};
+
+static const struct piix_map_db ich5_map_db = {
+ .mask = 0x7,
+ .port_enable = 0x3,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, P1, NA }, /* 000b */
+ { P1, NA, P0, NA }, /* 001b */
+ { RV, RV, RV, RV },
+ { RV, RV, RV, RV },
+ { P0, P1, IDE, IDE }, /* 100b */
+ { P1, P0, IDE, IDE }, /* 101b */
+ { IDE, IDE, P0, P1 }, /* 110b */
+ { IDE, IDE, P1, P0 }, /* 111b */
+ },
+};
+
+static const struct piix_map_db ich6_map_db = {
+ .mask = 0x3,
+ .port_enable = 0xf,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, P2, P1, P3 }, /* 00b */
+ { IDE, IDE, P1, P3 }, /* 01b */
+ { P0, P2, IDE, IDE }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db ich6m_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x5,
+
+ /* Map 01b isn't specified in the doc but some notebooks use
+ * it anyway. MAP 01b have been spotted on both ICH6M and
+ * ICH7M.
+ */
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, P2, NA, NA }, /* 00b */
+ { IDE, IDE, P1, P3 }, /* 01b */
+ { P0, P2, IDE, IDE }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db ich8_map_db = {
+ .mask = 0x3,
+ .port_enable = 0xf,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
+ { RV, RV, RV, RV },
+ { P0, P2, IDE, IDE }, /* 10b (IDE mode) */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db ich8_2port_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x3,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, P1, NA }, /* 00b */
+ { RV, RV, RV, RV }, /* 01b */
+ { RV, RV, RV, RV }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db ich8m_apple_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x1,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, NA, NA }, /* 00b */
+ { RV, RV, RV, RV },
+ { P0, P2, IDE, IDE }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db tolapai_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x3,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, P1, NA }, /* 00b */
+ { RV, RV, RV, RV }, /* 01b */
+ { RV, RV, RV, RV }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db *piix_map_db_table[] = {
+ [ich5_sata] = &ich5_map_db,
+ [ich6_sata] = &ich6_map_db,
+ [ich6m_sata] = &ich6m_map_db,
+ [ich8_sata] = &ich8_map_db,
+ [ich8_2port_sata] = &ich8_2port_map_db,
+ [ich8m_apple_sata] = &ich8m_apple_map_db,
+ [tolapai_sata] = &tolapai_map_db,
+};
+
+static struct ata_port_info piix_port_info[] = {
+ [piix_pata_mwdma] = /* PIIX3 MWDMA only */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .port_ops = &piix_pata_ops,
+ },
+
+ [piix_pata_33] = /* PIIX4 at 33MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA_MASK_40C,
+ .port_ops = &piix_pata_ops,
+ },
+
+ [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .mwdma_mask = 0x06, /* Check: maybe 0x07 */
+ .udma_mask = ATA_UDMA2, /* UDMA33 */
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_66] = /* ICH controllers up to 66MHz */
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich_pata_100] =
+ {
+ .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x06, /* mwdma1-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &ich_pata_ops,
+ },
+
+ [ich5_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich6m_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_2port_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [tolapai_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8m_apple_sata] =
+ {
+ .flags = PIIX_SATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [piix_pata_vmw] =
+ {
+ .flags = PIIX_PATA_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
+ .udma_mask = ATA_UDMA_MASK_40C,
+ .port_ops = &piix_vmw_ops,
+ },
+
+};
+
+static struct pci_bits piix_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+};
+
+MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+struct ich_laptop {
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+};
+
+/*
+ * List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+ /* devid, subvendor, subdev */
+ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
+ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
+ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
+ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
+ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
+ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
+ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
+ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
+ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
+ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ /* end marker */
+ { 0, }
+};
+
+/**
+ * ich_pata_cable_detect - Probe host controller cable detect info
+ * @ap: Port for which cable detect info is desired
+ *
+ * Read 80c cable indicator from ATA PCI device's PCI config
+ * register. This register is normally set by firmware (BIOS).
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static int ich_pata_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ const struct ich_laptop *lap = &ich_laptop[0];
+ u8 tmp, mask;
+
+ /* Check for specials - Acer Aspire 5602WLMi */
+ while (lap->device) {
+ if (lap->device == pdev->device &&
+ lap->subvendor == pdev->subsystem_vendor &&
+ lap->subdevice == pdev->subsystem_device)
+ return ATA_CBL_PATA40_SHORT;
+
+ lap++;
+ }
+
+ /* check BIOS cable detect results */
+ mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
+ pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
+ if ((tmp & mask) == 0)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * piix_pata_prereset - prereset for PATA host controller
+ * @link: Target link
+ * @deadline: deadline jiffies for the operation
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
+ return -ENOENT;
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * piix_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int is_slave = (adev->devno != 0);
+ unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int slave_port = 0x44;
+ u16 master_data;
+ u8 slave_data;
+ u8 udma_enable;
+ int control = 0;
+
+ /*
+ * See Intel Document 298600-004 for the timing programing rules
+ * for ICH controllers.
+ */
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio >= 2)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev))
+ control |= 2; /* IE enable */
+
+ /* Intel specifies that the PPE functionality is for disk only */
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE enable */
+
+ /* PIO configuration clears DTE unconditionally. It will be
+ * programmed in set_dmamode which is guaranteed to be called
+ * after set_piomode if any DMA mode is available.
+ */
+ pci_read_config_word(dev, master_port, &master_data);
+ if (is_slave) {
+ /* clear TIME1|IE1|PPE1|DTE1 */
+ master_data &= 0xff0f;
+ /* Enable SITRE (separate slave timing register) */
+ master_data |= 0x4000;
+ /* enable PPE1, IE1 and TIME1 as needed */
+ master_data |= (control << 4);
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the timing nibble for this slave */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+ << (ap->port_no ? 4 : 0);
+ } else {
+ /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+ master_data &= 0xccf0;
+ /* Enable PPE, IE and TIME as appropriate */
+ master_data |= control;
+ /* load ISP and RCT */
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+
+ /* Ensure the UDMA bit is off - it will be turned back on if
+ UDMA is selected */
+
+ if (ap->udma_mask) {
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+ udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+ pci_write_config_byte(dev, 0x48, udma_enable);
+ }
+}
+
+/**
+ * do_pata_set_dmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Drive in question
+ * @isich: set if the chip is an ICH device
+ *
+ * Set UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u8 master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
+ u8 speed = adev->dma_mode;
+ int devid = adev->devno + 2 * ap->port_no;
+ u8 udma_enable = 0;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, master_port, &master_data);
+ if (ap->udma_mask)
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (speed >= XFER_UDMA_0) {
+ unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ u16 udma_timing;
+ u16 ideconf;
+ int u_clock, u_speed;
+
+ /*
+ * UDMA is handled by a combination of clock switching and
+ * selection of dividers
+ *
+ * Handy rule: Odd modes are UDMATIMx 01, even are 02
+ * except UDMA0 which is 00
+ */
+ u_speed = min(2 - (udma & 1), udma);
+ if (udma == 5)
+ u_clock = 0x1000; /* 100Mhz */
+ else if (udma > 2)
+ u_clock = 1; /* 66Mhz */
+ else
+ u_clock = 0; /* 33Mhz */
+
+ udma_enable |= (1 << devid);
+
+ /* Load the CT/RP selection */
+ pci_read_config_word(dev, 0x4A, &udma_timing);
+ udma_timing &= ~(3 << (4 * devid));
+ udma_timing |= u_speed << (4 * devid);
+ pci_write_config_word(dev, 0x4A, udma_timing);
+
+ if (isich) {
+ /* Select a 33/66/100Mhz clock */
+ pci_read_config_word(dev, 0x54, &ideconf);
+ ideconf &= ~(0x1001 << devid);
+ ideconf |= u_clock << devid;
+ /* For ICH or later we should set bit 10 for better
+ performance (WR_PingPong_En) */
+ pci_write_config_word(dev, 0x54, ideconf);
+ }
+ } else {
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ u8 slave_data;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ control = 3; /* IORDY|TIME1 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ if (adev->devno) { /* Slave */
+ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
+ master_data |= control << 4;
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the matching timing */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+ pci_write_config_byte(dev, 0x44, slave_data);
+ } else { /* Master */
+ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
+ and master timing bits */
+ master_data |= control;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+
+ if (ap->udma_mask) {
+ udma_enable &= ~(1 << devid);
+ pci_write_config_word(dev, master_port, master_data);
+ }
+ }
+ /* Don't scribble on 0x48 if the controller does not support UDMA */
+ if (ap->udma_mask)
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+/**
+ * piix_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ do_pata_set_dmamode(ap, adev, 0);
+}
+
+/**
+ * ich_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ do_pata_set_dmamode(ap, adev, 1);
+}
+
+/*
+ * Serial ATA Index/Data Pair Superset Registers access
+ *
+ * Beginning from ICH8, there's a sane way to access SCRs using index
+ * and data register pair located at BAR5 which means that we have
+ * separate SCRs for master and slave. This is handled using libata
+ * slave_link facility.
+ */
+static const int piix_sidx_map[] = {
+ [SCR_STATUS] = 0,
+ [SCR_ERROR] = 2,
+ [SCR_CONTROL] = 1,
+};
+
+static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
+{
+ struct ata_port *ap = link->ap;
+ struct piix_host_priv *hpriv = ap->host->private_data;
+
+ iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
+ hpriv->sidpr + PIIX_SIDPR_IDX);
+}
+
+static int piix_sidpr_scr_read(struct ata_link *link,
+ unsigned int reg, u32 *val)
+{
+ struct piix_host_priv *hpriv = link->ap->host->private_data;
+
+ if (reg >= ARRAY_SIZE(piix_sidx_map))
+ return -EINVAL;
+
+ piix_sidpr_sel(link, reg);
+ *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
+ return 0;
+}
+
+static int piix_sidpr_scr_write(struct ata_link *link,
+ unsigned int reg, u32 val)
+{
+ struct piix_host_priv *hpriv = link->ap->host->private_data;
+
+ if (reg >= ARRAY_SIZE(piix_sidx_map))
+ return -EINVAL;
+
+ piix_sidpr_sel(link, reg);
+ iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int piix_broken_suspend(void)
+{
+ static const struct dmi_system_id sysids[] = {
+ {
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
+ },
+ },
+ {
+ .ident = "TECRA M3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
+ },
+ },
+ {
+ .ident = "TECRA M4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
+ },
+ },
+ {
+ .ident = "TECRA M4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
+ },
+ },
+ {
+ .ident = "TECRA M5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
+ },
+ },
+ {
+ .ident = "TECRA M6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
+ },
+ },
+ {
+ .ident = "TECRA M7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
+ },
+ },
+ {
+ .ident = "TECRA A8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
+ },
+ },
+ {
+ .ident = "Satellite R20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
+ },
+ },
+ {
+ .ident = "Satellite R25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
+ },
+ },
+ {
+ .ident = "Satellite U200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
+ },
+ },
+ {
+ .ident = "Satellite U200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
+ },
+ },
+ {
+ .ident = "Satellite Pro U200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
+ },
+ },
+ {
+ .ident = "Satellite U205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
+ },
+ },
+ {
+ .ident = "SATELLITE U205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
+ },
+ },
+ {
+ .ident = "Portege M500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
+ },
+ },
+
+ { } /* terminate list */
+ };
+ static const char *oemstrs[] = {
+ "Tecra M3,",
+ };
+ int i;
+
+ if (dmi_check_system(sysids))
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
+ if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
+ return 1;
+
+ /* TECRA M4 sometimes forgets its identify and reports bogus
+ * DMI information. As the bogus information is a bit
+ * generic, match as many entries as possible. This manual
+ * matching is necessary because dmi_system_id.matches is
+ * limited to four entries.
+ */
+ if (dmi_get_system_info(DMI_SYS_VENDOR) &&
+ dmi_get_system_info(DMI_PRODUCT_NAME) &&
+ dmi_get_system_info(DMI_PRODUCT_VERSION) &&
+ dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
+ dmi_get_system_info(DMI_BOARD_VENDOR) &&
+ dmi_get_system_info(DMI_BOARD_NAME) &&
+ dmi_get_system_info(DMI_BOARD_VERSION) &&
+ !strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
+ !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
+ !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
+ !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&
+ !strcmp(dmi_get_system_info(DMI_BOARD_VENDOR), "TOSHIBA") &&
+ !strcmp(dmi_get_system_info(DMI_BOARD_NAME), "Portable PC") &&
+ !strcmp(dmi_get_system_info(DMI_BOARD_VERSION), "Version A0"))
+ return 1;
+
+ return 0;
+}
+
+static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned long flags;
+ int rc = 0;
+
+ rc = ata_host_suspend(host, mesg);
+ if (rc)
+ return rc;
+
+ /* Some braindamaged ACPI suspend implementations expect the
+ * controller to be awake on entry; otherwise, it burns cpu
+ * cycles and power trying to do something to the sleeping
+ * beauty.
+ */
+ if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
+ pci_save_state(pdev);
+
+ /* mark its power state as "unknown", since we don't
+ * know if e.g. the BIOS will change its device state
+ * when we suspend.
+ */
+ if (pdev->current_state == PCI_D0)
+ pdev->current_state = PCI_UNKNOWN;
+
+ /* tell resume that it's waking up from broken suspend */
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= PIIX_HOST_BROKEN_SUSPEND;
+ spin_unlock_irqrestore(&host->lock, flags);
+ } else
+ ata_pci_device_do_suspend(pdev, mesg);
+
+ return 0;
+}
+
+static int piix_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned long flags;
+ int rc;
+
+ if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ /* PCI device wasn't disabled during suspend. Use
+ * pci_reenable_device() to avoid affecting the enable
+ * count.
+ */
+ rc = pci_reenable_device(pdev);
+ if (rc)
+ dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
+ "device after resume (%d)\n", rc);
+ } else
+ rc = ata_pci_device_do_resume(pdev);
+
+ if (rc == 0)
+ ata_host_resume(host);
+
+ return rc;
+}
+#endif
+
+static u8 piix_vmw_bmdma_status(struct ata_port *ap)
+{
+ return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
+}
+
+#define AHCI_PCI_BAR 5
+#define AHCI_GLOBAL_CTL 0x04
+#define AHCI_ENABLE (1 << 31)
+static int piix_disable_ahci(struct pci_dev *pdev)
+{
+ void __iomem *mmio;
+ u32 tmp;
+ int rc = 0;
+
+ /* BUG: pci_enable_device has not yet been called. This
+ * works because this device is usually set up by BIOS.
+ */
+
+ if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
+ !pci_resource_len(pdev, AHCI_PCI_BAR))
+ return 0;
+
+ mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
+ if (!mmio)
+ return -ENOMEM;
+
+ tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
+ if (tmp & AHCI_ENABLE) {
+ tmp &= ~AHCI_ENABLE;
+ iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
+
+ tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
+ if (tmp & AHCI_ENABLE)
+ rc = -EIO;
+ }
+
+ pci_iounmap(pdev, mmio);
+ return rc;
+}
+
+/**
+ * piix_check_450nx_errata - Check for problem 450NX setup
+ * @ata_dev: the PCI device to check
+ *
+ * Check for the present of 450NX errata #19 and errata #25. If
+ * they are found return an error code so we can turn off DMA
+ */
+
+static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
+{
+ struct pci_dev *pdev = NULL;
+ u16 cfg;
+ int no_piix_dma = 0;
+
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
+ /* Look for 450NX PXB. Check for problem configurations
+ A PCI quirk checks bit 6 already */
+ pci_read_config_word(pdev, 0x41, &cfg);
+ /* Only on the original revision: IDE DMA can hang */
+ if (pdev->revision == 0x00)
+ no_piix_dma = 1;
+ /* On all revisions below 5 PXB bus lock must be disabled for IDE */
+ else if (cfg & (1<<14) && pdev->revision < 5)
+ no_piix_dma = 2;
+ }
+ if (no_piix_dma)
+ dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
+ if (no_piix_dma == 2)
+ dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
+ return no_piix_dma;
+}
+
+static void __devinit piix_init_pcs(struct ata_host *host,
+ const struct piix_map_db *map_db)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u16 pcs, new_pcs;
+
+ pci_read_config_word(pdev, ICH5_PCS, &pcs);
+
+ new_pcs = pcs | map_db->port_enable;
+
+ if (new_pcs != pcs) {
+ DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
+ pci_write_config_word(pdev, ICH5_PCS, new_pcs);
+ msleep(150);
+ }
+}
+
+static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
+ struct ata_port_info *pinfo,
+ const struct piix_map_db *map_db)
+{
+ const int *map;
+ int i, invalid_map = 0;
+ u8 map_value;
+
+ pci_read_config_byte(pdev, ICH5_PMR, &map_value);
+
+ map = map_db->map[map_value & map_db->mask];
+
+ dev_printk(KERN_INFO, &pdev->dev, "MAP [");
+ for (i = 0; i < 4; i++) {
+ switch (map[i]) {
+ case RV:
+ invalid_map = 1;
+ printk(" XX");
+ break;
+
+ case NA:
+ printk(" --");
+ break;
+
+ case IDE:
+ WARN_ON((i & 1) || map[i + 1] != IDE);
+ pinfo[i / 2] = piix_port_info[ich_pata_100];
+ i++;
+ printk(" IDE IDE");
+ break;
+
+ default:
+ printk(" P%d", map[i]);
+ if (i & 1)
+ pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
+ break;
+ }
+ }
+ printk(" ]\n");
+
+ if (invalid_map)
+ dev_printk(KERN_ERR, &pdev->dev,
+ "invalid MAP value %u\n", map_value);
+
+ return map;
+}
+
+static bool piix_no_sidpr(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+
+ /*
+ * Samsung DB-P70 only has three ATA ports exposed and
+ * curiously the unconnected first port reports link online
+ * while not responding to SRST protocol causing excessive
+ * detection delay.
+ *
+ * Unfortunately, the system doesn't carry enough DMI
+ * information to identify the machine but does have subsystem
+ * vendor and device set. As it's unclear whether the
+ * subsystem vendor/device is used only for this specific
+ * board, the port can't be disabled solely with the
+ * information; however, turning off SIDPR access works around
+ * the problem. Turn it off.
+ *
+ * This problem is reported in bnc#441240.
+ *
+ * https://bugzilla.novell.com/show_bug.cgi?id=441420
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xb049) {
+ dev_printk(KERN_WARNING, host->dev,
+ "Samsung DB-P70 detected, disabling SIDPR\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int __devinit piix_init_sidpr(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct piix_host_priv *hpriv = host->private_data;
+ struct ata_link *link0 = &host->ports[0]->link;
+ u32 scontrol;
+ int i, rc;
+
+ /* check for availability */
+ for (i = 0; i < 4; i++)
+ if (hpriv->map[i] == IDE)
+ return 0;
+
+ /* is it blacklisted? */
+ if (piix_no_sidpr(host))
+ return 0;
+
+ if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
+ return 0;
+
+ if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
+ pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
+ return 0;
+
+ if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
+ return 0;
+
+ hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
+
+ /* SCR access via SIDPR doesn't work on some configurations.
+ * Give it a test drive by inhibiting power save modes which
+ * we'll do anyway.
+ */
+ piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
+
+ /* if IPM is already 3, SCR access is probably working. Don't
+ * un-inhibit power save modes as BIOS might have inhibited
+ * them for a reason.
+ */
+ if ((scontrol & 0xf00) != 0x300) {
+ scontrol |= 0x300;
+ piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
+ piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
+
+ if ((scontrol & 0xf00) != 0x300) {
+ dev_printk(KERN_INFO, host->dev, "SCR access via "
+ "SIDPR is available but doesn't work\n");
+ return 0;
+ }
+ }
+
+ /* okay, SCRs available, set ops and ask libata for slave_link */
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ap->ops = &piix_sidpr_sata_ops;
+
+ if (ap->flags & ATA_FLAG_SLAVE_POSS) {
+ rc = ata_slave_link_init(ap);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ {
+ /* Clevo M570U sets IOCFG bit 18 if the cdrom
+ * isn't used to boot the system which
+ * disables the channel.
+ */
+ .ident = "M570U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
+ },
+ },
+
+ { } /* terminate list */
+ };
+ u32 iocfg;
+
+ if (!dmi_check_system(sysids))
+ return;
+
+ /* The datasheet says that bit 18 is NOOP but certain systems
+ * seem to use it to disable a channel. Clear the bit on the
+ * affected systems.
+ */
+ pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg);
+ if (iocfg & (1 << 18)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "applying IOCFG bit18 quirk\n");
+ iocfg &= ~(1 << 18);
+ pci_write_config_dword(pdev, PIIX_IOCFG, iocfg);
+ }
+}
+
+/**
+ * piix_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in piix_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int __devinit piix_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct device *dev = &pdev->dev;
+ struct ata_port_info port_info[2];
+ const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+ unsigned long port_flags;
+ struct ata_host *host;
+ struct piix_host_priv *hpriv;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ /* no hotplugging support (FIXME) */
+ if (!in_module_init)
+ return -ENODEV;
+
+ port_info[0] = piix_port_info[ent->driver_data];
+ port_info[1] = piix_port_info[ent->driver_data];
+
+ port_flags = port_info[0].flags;
+
+ /* enable device and prepare host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* ICH6R may be driven by either ata_piix or ahci driver
+ * regardless of BIOS configuration. Make sure AHCI mode is
+ * off.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
+ rc = piix_disable_ahci(pdev);
+ if (rc)
+ return rc;
+ }
+
+ /* SATA map init can change port_info, do it before prepping host */
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ if (port_flags & ATA_FLAG_SATA)
+ hpriv->map = piix_init_sata_map(pdev, port_info,
+ piix_map_db_table[ent->driver_data]);
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ host->private_data = hpriv;
+
+ /* initialize controller */
+ if (port_flags & ATA_FLAG_SATA) {
+ piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
+ rc = piix_init_sidpr(host);
+ if (rc)
+ return rc;
+ }
+
+ /* apply IOCFG bit18 quirk */
+ piix_iocfg_bit18_quirk(pdev);
+
+ /* On ICH5, some BIOSen disable the interrupt using the
+ * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
+ * On ICH6, this bit has the same effect, but only when
+ * MSI is disabled (and it is disabled, as we don't use
+ * message-signalled interrupts currently).
+ */
+ if (port_flags & PIIX_FLAG_CHECKINTR)
+ pci_intx(pdev, 1);
+
+ if (piix_check_450nx_errata(pdev)) {
+ /* This writes into the master table but it does not
+ really matter for this errata as we will apply it to
+ all the PIIX devices on the board */
+ host->ports[0]->mwdma_mask = 0;
+ host->ports[0]->udma_mask = 0;
+ host->ports[1]->mwdma_mask = 0;
+ host->ports[1]->udma_mask = 0;
+ }
+
+ pci_set_master(pdev);
+ return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht);
+}
+
+static int __init piix_init(void)
+{
+ int rc;
+
+ DPRINTK("pci_register_driver\n");
+ rc = pci_register_driver(&piix_pci_driver);
+ if (rc)
+ return rc;
+
+ in_module_init = 0;
+
+ DPRINTK("done\n");
+ return 0;
+}
+
+static void __exit piix_exit(void)
+{
+ pci_unregister_driver(&piix_pci_driver);
+}
+
+module_init(piix_init);
+module_exit(piix_exit);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
new file mode 100644
index 0000000..c012307
--- /dev/null
+++ b/drivers/ata/libata-acpi.c
@@ -0,0 +1,1037 @@
+/*
+ * libata-acpi.c
+ * Provides ACPI support for PATA/SATA.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Copyright (C) 2006 Randy Dunlap
+ */
+
+#include <linux/module.h>
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/libata.h>
+#include <linux/pci.h>
+#include <scsi/scsi_device.h>
+#include "libata.h"
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acnames.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+#include <acpi/acexcep.h>
+#include <acpi/acmacros.h>
+#include <acpi/actypes.h>
+
+enum {
+ ATA_ACPI_FILTER_SETXFER = 1 << 0,
+ ATA_ACPI_FILTER_LOCK = 1 << 1,
+ ATA_ACPI_FILTER_DIPM = 1 << 2,
+
+ ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
+ ATA_ACPI_FILTER_LOCK |
+ ATA_ACPI_FILTER_DIPM,
+};
+
+static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT;
+module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644);
+MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM)");
+
+#define NO_PORT_MULT 0xffff
+#define SATA_ADR(root, pmp) (((root) << 16) | (pmp))
+
+#define REGS_PER_GTF 7
+struct ata_acpi_gtf {
+ u8 tf[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
+} __packed;
+
+/*
+ * Helper - belongs in the PCI layer somewhere eventually
+ */
+static int is_pci_dev(struct device *dev)
+{
+ return (dev->bus == &pci_bus_type);
+}
+
+static void ata_acpi_clear_gtf(struct ata_device *dev)
+{
+ kfree(dev->gtf_cache);
+ dev->gtf_cache = NULL;
+}
+
+/**
+ * ata_acpi_associate_sata_port - associate SATA port with ACPI objects
+ * @ap: target SATA port
+ *
+ * Look up ACPI objects associated with @ap and initialize acpi_handle
+ * fields of @ap, the port and devices accordingly.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+void ata_acpi_associate_sata_port(struct ata_port *ap)
+{
+ WARN_ON(!(ap->flags & ATA_FLAG_ACPI_SATA));
+
+ if (!sata_pmp_attached(ap)) {
+ acpi_integer adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
+
+ ap->link.device->acpi_handle =
+ acpi_get_child(ap->host->acpi_handle, adr);
+ } else {
+ struct ata_link *link;
+
+ ap->link.device->acpi_handle = NULL;
+
+ ata_port_for_each_link(link, ap) {
+ acpi_integer adr = SATA_ADR(ap->port_no, link->pmp);
+
+ link->device->acpi_handle =
+ acpi_get_child(ap->host->acpi_handle, adr);
+ }
+ }
+}
+
+static void ata_acpi_associate_ide_port(struct ata_port *ap)
+{
+ int max_devices, i;
+
+ ap->acpi_handle = acpi_get_child(ap->host->acpi_handle, ap->port_no);
+ if (!ap->acpi_handle)
+ return;
+
+ max_devices = 1;
+ if (ap->flags & ATA_FLAG_SLAVE_POSS)
+ max_devices++;
+
+ for (i = 0; i < max_devices; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ dev->acpi_handle = acpi_get_child(ap->acpi_handle, i);
+ }
+
+ if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+ ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
+}
+
+/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
+static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
+{
+ if (dev)
+ dev->flags |= ATA_DFLAG_DETACH;
+ else {
+ struct ata_link *tlink;
+ struct ata_device *tdev;
+
+ ata_port_for_each_link(tlink, ap)
+ ata_link_for_each_dev(tdev, tlink)
+ tdev->flags |= ATA_DFLAG_DETACH;
+ }
+
+ ata_port_schedule_eh(ap);
+}
+
+/**
+ * ata_acpi_handle_hotplug - ACPI event handler backend
+ * @ap: ATA port ACPI event occurred
+ * @dev: ATA device ACPI event occurred (can be NULL)
+ * @event: ACPI event which occurred
+ *
+ * All ACPI bay / device realted events end up in this function. If
+ * the event is port-wide @dev is NULL. If the event is specific to a
+ * device, @dev points to it.
+ *
+ * Hotplug (as opposed to unplug) notification is always handled as
+ * port-wide while unplug only kills the target device on device-wide
+ * event.
+ *
+ * LOCKING:
+ * ACPI notify handler context. May sleep.
+ */
+static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
+ u32 event)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ int wait = 0;
+ unsigned long flags;
+ acpi_handle handle;
+
+ if (dev)
+ handle = dev->acpi_handle;
+ else
+ handle = ap->acpi_handle;
+
+ spin_lock_irqsave(ap->lock, flags);
+ /*
+ * When dock driver calls into the routine, it will always use
+ * ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
+ * ACPI_NOTIFY_EJECT_REQUEST for remove
+ */
+ switch (event) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ ata_ehi_push_desc(ehi, "ACPI event");
+
+ ata_ehi_hotplugged(ehi);
+ ata_port_freeze(ap);
+ break;
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ ata_ehi_push_desc(ehi, "ACPI event");
+
+ ata_acpi_detach_device(ap, dev);
+ wait = 1;
+ break;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if (wait)
+ ata_port_wait_eh(ap);
+}
+
+static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
+{
+ struct ata_device *dev = data;
+
+ ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+}
+
+static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
+{
+ struct ata_port *ap = data;
+
+ ata_acpi_handle_hotplug(ap, NULL, event);
+}
+
+static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
+ u32 event)
+{
+ struct kobject *kobj = NULL;
+ char event_string[20];
+ char *envp[] = { event_string, NULL };
+
+ if (dev) {
+ if (dev->sdev)
+ kobj = &dev->sdev->sdev_gendev.kobj;
+ } else
+ kobj = &ap->dev->kobj;
+
+ if (kobj) {
+ snprintf(event_string, 20, "BAY_EVENT=%d", event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, envp);
+ }
+}
+
+static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
+{
+ ata_acpi_uevent(data, NULL, event);
+}
+
+static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+{
+ struct ata_device *dev = data;
+ ata_acpi_uevent(dev->link->ap, dev, event);
+}
+
+static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+ .handler = ata_acpi_dev_notify_dock,
+ .uevent = ata_acpi_dev_uevent,
+};
+
+static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+ .handler = ata_acpi_ap_notify_dock,
+ .uevent = ata_acpi_ap_uevent,
+};
+
+/**
+ * ata_acpi_associate - associate ATA host with ACPI objects
+ * @host: target ATA host
+ *
+ * Look up ACPI objects associated with @host and initialize
+ * acpi_handle fields of @host, its ports and devices accordingly.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+void ata_acpi_associate(struct ata_host *host)
+{
+ int i, j;
+
+ if (!is_pci_dev(host->dev) || libata_noacpi)
+ return;
+
+ host->acpi_handle = DEVICE_ACPI_HANDLE(host->dev);
+ if (!host->acpi_handle)
+ return;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (host->ports[0]->flags & ATA_FLAG_ACPI_SATA)
+ ata_acpi_associate_sata_port(ap);
+ else
+ ata_acpi_associate_ide_port(ap);
+
+ if (ap->acpi_handle) {
+ /* we might be on a docking station */
+ register_hotplug_dock_device(ap->acpi_handle,
+ &ata_acpi_ap_dock_ops, ap);
+ }
+
+ for (j = 0; j < ata_link_max_devices(&ap->link); j++) {
+ struct ata_device *dev = &ap->link.device[j];
+
+ if (dev->acpi_handle) {
+ /* we might be on a docking station */
+ register_hotplug_dock_device(dev->acpi_handle,
+ &ata_acpi_dev_dock_ops, dev);
+ }
+ }
+ }
+}
+
+/**
+ * ata_acpi_dissociate - dissociate ATA host from ACPI objects
+ * @host: target ATA host
+ *
+ * This function is called during driver detach after the whole host
+ * is shut down.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_dissociate(struct ata_host *host)
+{
+ int i;
+
+ /* Restore initial _GTM values so that driver which attaches
+ * afterward can use them too.
+ */
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (ap->acpi_handle && gtm)
+ ata_acpi_stm(ap, gtm);
+ }
+}
+
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+{
+ struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
+ union acpi_object *out_obj;
+ acpi_status status;
+ int rc = 0;
+
+ status = acpi_evaluate_object(ap->acpi_handle, "_GTM", NULL, &output);
+
+ rc = -ENOENT;
+ if (status == AE_NOT_FOUND)
+ goto out_free;
+
+ rc = -EINVAL;
+ if (ACPI_FAILURE(status)) {
+ ata_port_printk(ap, KERN_ERR,
+ "ACPI get timing mode failed (AE 0x%x)\n",
+ status);
+ goto out_free;
+ }
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ata_port_printk(ap, KERN_WARNING,
+ "_GTM returned unexpected object type 0x%x\n",
+ out_obj->type);
+
+ goto out_free;
+ }
+
+ if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
+ ata_port_printk(ap, KERN_ERR,
+ "_GTM returned invalid length %d\n",
+ out_obj->buffer.length);
+ goto out_free;
+ }
+
+ memcpy(gtm, out_obj->buffer.pointer, sizeof(struct ata_acpi_gtm));
+ rc = 0;
+ out_free:
+ kfree(output.pointer);
+ return rc;
+}
+
+EXPORT_SYMBOL_GPL(ata_acpi_gtm);
+
+/**
+ * ata_acpi_stm - execute _STM
+ * @ap: target ATA port
+ * @stm: timing parameter to _STM
+ *
+ * Evaluate _STM with timing parameter @stm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _STM doesn't exist, -errno on failure.
+ */
+int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
+{
+ acpi_status status;
+ struct ata_acpi_gtm stm_buf = *stm;
+ struct acpi_object_list input;
+ union acpi_object in_params[3];
+
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = sizeof(struct ata_acpi_gtm);
+ in_params[0].buffer.pointer = (u8 *)&stm_buf;
+ /* Buffers for id may need byteswapping ? */
+ in_params[1].type = ACPI_TYPE_BUFFER;
+ in_params[1].buffer.length = 512;
+ in_params[1].buffer.pointer = (u8 *)ap->link.device[0].id;
+ in_params[2].type = ACPI_TYPE_BUFFER;
+ in_params[2].buffer.length = 512;
+ in_params[2].buffer.pointer = (u8 *)ap->link.device[1].id;
+
+ input.count = 3;
+ input.pointer = in_params;
+
+ status = acpi_evaluate_object(ap->acpi_handle, "_STM", &input, NULL);
+
+ if (status == AE_NOT_FOUND)
+ return -ENOENT;
+ if (ACPI_FAILURE(status)) {
+ ata_port_printk(ap, KERN_ERR,
+ "ACPI set timing mode failed (status=0x%x)\n", status);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ata_acpi_stm);
+
+/**
+ * ata_dev_get_GTF - get the drive bootup default taskfile settings
+ * @dev: target ATA device
+ * @gtf: output parameter for buffer containing _GTF taskfile arrays
+ *
+ * This applies to both PATA and SATA drives.
+ *
+ * The _GTF method has no input parameters.
+ * It returns a variable number of register set values (registers
+ * hex 1F1..1F7, taskfiles).
+ * The <variable number> is not known in advance, so have ACPI-CA
+ * allocate the buffer as needed and return it, then free it later.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of taskfiles on success, 0 if _GTF doesn't exist. -EINVAL
+ * if _GTF is invalid.
+ */
+static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
+{
+ struct ata_port *ap = dev->link->ap;
+ acpi_status status;
+ struct acpi_buffer output;
+ union acpi_object *out_obj;
+ int rc = 0;
+
+ /* if _GTF is cached, use the cached value */
+ if (dev->gtf_cache) {
+ out_obj = dev->gtf_cache;
+ goto done;
+ }
+
+ /* set up output buffer */
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
+
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
+ __func__, ap->port_no);
+
+ /* _GTF has no input parameters */
+ status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output);
+ out_obj = dev->gtf_cache = output.pointer;
+
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "_GTF evaluation failed (AE 0x%x)\n",
+ status);
+ rc = -EINVAL;
+ }
+ goto out_free;
+ }
+
+ if (!output.length || !output.pointer) {
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
+ "length or ptr is NULL (0x%llx, 0x%p)\n",
+ __func__,
+ (unsigned long long)output.length,
+ output.pointer);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "_GTF unexpected object type 0x%x\n",
+ out_obj->type);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (out_obj->buffer.length % REGS_PER_GTF) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "unexpected _GTF length (%d)\n",
+ out_obj->buffer.length);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ done:
+ rc = out_obj->buffer.length / REGS_PER_GTF;
+ if (gtf) {
+ *gtf = (void *)out_obj->buffer.pointer;
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: returning gtf=%p, gtf_count=%d\n",
+ __func__, *gtf, rc);
+ }
+ return rc;
+
+ out_free:
+ ata_acpi_clear_gtf(dev);
+ return rc;
+}
+
+/**
+ * ata_acpi_gtm_xfermode - determine xfermode from GTM parameter
+ * @dev: target device
+ * @gtm: GTM parameter to use
+ *
+ * Determine xfermask for @dev from @gtm.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Determined xfermask.
+ */
+unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm)
+{
+ unsigned long xfer_mask = 0;
+ unsigned int type;
+ int unit;
+ u8 mode;
+
+ /* we always use the 0 slot for crap hardware */
+ unit = dev->devno;
+ if (!(gtm->flags & 0x10))
+ unit = 0;
+
+ /* PIO */
+ mode = ata_timing_cycle2mode(ATA_SHIFT_PIO, gtm->drive[unit].pio);
+ xfer_mask |= ata_xfer_mode2mask(mode);
+
+ /* See if we have MWDMA or UDMA data. We don't bother with
+ * MWDMA if UDMA is available as this means the BIOS set UDMA
+ * and our error changedown if it works is UDMA to PIO anyway.
+ */
+ if (!(gtm->flags & (1 << (2 * unit))))
+ type = ATA_SHIFT_MWDMA;
+ else
+ type = ATA_SHIFT_UDMA;
+
+ mode = ata_timing_cycle2mode(type, gtm->drive[unit].dma);
+ xfer_mask |= ata_xfer_mode2mask(mode);
+
+ return xfer_mask;
+}
+EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
+
+/**
+ * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * @ap: Port to check
+ * @gtm: GTM data to use
+ *
+ * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ */
+int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, &ap->link) {
+ unsigned long xfer_mask, udma_mask;
+
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
+ ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C)
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+
+static void ata_acpi_gtf_to_tf(struct ata_device *dev,
+ const struct ata_acpi_gtf *gtf,
+ struct ata_taskfile *tf)
+{
+ ata_tf_init(dev, tf);
+
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->protocol = ATA_PROT_NODATA;
+ tf->feature = gtf->tf[0]; /* 0x1f1 */
+ tf->nsect = gtf->tf[1]; /* 0x1f2 */
+ tf->lbal = gtf->tf[2]; /* 0x1f3 */
+ tf->lbam = gtf->tf[3]; /* 0x1f4 */
+ tf->lbah = gtf->tf[4]; /* 0x1f5 */
+ tf->device = gtf->tf[5]; /* 0x1f6 */
+ tf->command = gtf->tf[6]; /* 0x1f7 */
+}
+
+static int ata_acpi_filter_tf(const struct ata_taskfile *tf,
+ const struct ata_taskfile *ptf)
+{
+ if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_SETXFER) {
+ /* libata doesn't use ACPI to configure transfer mode.
+ * It will only confuse device configuration. Skip.
+ */
+ if (tf->command == ATA_CMD_SET_FEATURES &&
+ tf->feature == SETFEATURES_XFER)
+ return 1;
+ }
+
+ if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_LOCK) {
+ /* BIOS writers, sorry but we don't wanna lock
+ * features unless the user explicitly said so.
+ */
+
+ /* DEVICE CONFIGURATION FREEZE LOCK */
+ if (tf->command == ATA_CMD_CONF_OVERLAY &&
+ tf->feature == ATA_DCO_FREEZE_LOCK)
+ return 1;
+
+ /* SECURITY FREEZE LOCK */
+ if (tf->command == ATA_CMD_SEC_FREEZE_LOCK)
+ return 1;
+
+ /* SET MAX LOCK and SET MAX FREEZE LOCK */
+ if ((!ptf || ptf->command != ATA_CMD_READ_NATIVE_MAX) &&
+ tf->command == ATA_CMD_SET_MAX &&
+ (tf->feature == ATA_SET_MAX_LOCK ||
+ tf->feature == ATA_SET_MAX_FREEZE_LOCK))
+ return 1;
+ }
+
+ if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_DIPM) {
+ /* inhibit enabling DIPM */
+ if (tf->command == ATA_CMD_SET_FEATURES &&
+ tf->feature == SETFEATURES_SATA_ENABLE &&
+ tf->nsect == SATA_DIPM)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_acpi_run_tf - send taskfile registers to host controller
+ * @dev: target ATA device
+ * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
+ *
+ * Outputs ATA taskfile to standard ATA host controller using MMIO
+ * or PIO as indicated by the ATA_FLAG_MMIO flag.
+ * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
+ * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
+ * hob_lbal, hob_lbam, and hob_lbah.
+ *
+ * This function waits for idle (!BUSY and !DRQ) after writing
+ * registers. If the control register has a new value, this
+ * function also waits for idle after writing control and before
+ * writing the remaining registers.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 1 if command is executed successfully. 0 if ignored, rejected or
+ * filtered out, -errno on other errors.
+ */
+static int ata_acpi_run_tf(struct ata_device *dev,
+ const struct ata_acpi_gtf *gtf,
+ const struct ata_acpi_gtf *prev_gtf)
+{
+ struct ata_taskfile *pptf = NULL;
+ struct ata_taskfile tf, ptf, rtf;
+ unsigned int err_mask;
+ const char *level;
+ char msg[60];
+ int rc;
+
+ if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
+ && (gtf->tf[3] == 0) && (gtf->tf[4] == 0) && (gtf->tf[5] == 0)
+ && (gtf->tf[6] == 0))
+ return 0;
+
+ ata_acpi_gtf_to_tf(dev, gtf, &tf);
+ if (prev_gtf) {
+ ata_acpi_gtf_to_tf(dev, prev_gtf, &ptf);
+ pptf = &ptf;
+ }
+
+ if (!ata_acpi_filter_tf(&tf, pptf)) {
+ rtf = tf;
+ err_mask = ata_exec_internal(dev, &rtf, NULL,
+ DMA_NONE, NULL, 0, 0);
+
+ switch (err_mask) {
+ case 0:
+ level = KERN_DEBUG;
+ snprintf(msg, sizeof(msg), "succeeded");
+ rc = 1;
+ break;
+
+ case AC_ERR_DEV:
+ level = KERN_INFO;
+ snprintf(msg, sizeof(msg),
+ "rejected by device (Stat=0x%02x Err=0x%02x)",
+ rtf.command, rtf.feature);
+ rc = 0;
+ break;
+
+ default:
+ level = KERN_ERR;
+ snprintf(msg, sizeof(msg),
+ "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
+ err_mask, rtf.command, rtf.feature);
+ rc = -EIO;
+ break;
+ }
+ } else {
+ level = KERN_INFO;
+ snprintf(msg, sizeof(msg), "filtered out");
+ rc = 0;
+ }
+
+ ata_dev_printk(dev, level,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, msg);
+
+ return rc;
+}
+
+/**
+ * ata_acpi_exec_tfs - get then write drive taskfile settings
+ * @dev: target ATA device
+ * @nr_executed: out paramter for the number of executed commands
+ *
+ * Evaluate _GTF and excute returned taskfiles.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Number of executed taskfiles on success, 0 if _GTF doesn't exist.
+ * -errno on other errors.
+ */
+static int ata_acpi_exec_tfs(struct ata_device *dev, int *nr_executed)
+{
+ struct ata_acpi_gtf *gtf = NULL, *pgtf = NULL;
+ int gtf_count, i, rc;
+
+ /* get taskfiles */
+ rc = ata_dev_get_GTF(dev, &gtf);
+ if (rc < 0)
+ return rc;
+ gtf_count = rc;
+
+ /* execute them */
+ for (i = 0; i < gtf_count; i++, gtf++) {
+ rc = ata_acpi_run_tf(dev, gtf, pgtf);
+ if (rc < 0)
+ break;
+ if (rc) {
+ (*nr_executed)++;
+ pgtf = gtf;
+ }
+ }
+
+ ata_acpi_clear_gtf(dev);
+
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+/**
+ * ata_acpi_push_id - send Identify data to drive
+ * @dev: target ATA device
+ *
+ * _SDD ACPI object: for SATA mode only
+ * Must be after Identify (Packet) Device -- uses its data
+ * ATM this function never returns a failure. It is an optional
+ * method and if it fails for whatever reason, we should still
+ * just keep going.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_acpi_push_id(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ int err;
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object in_params[1];
+
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
+ __func__, dev->devno, ap->port_no);
+
+ /* Give the drive Identify data to the drive via the _SDD method */
+ /* _SDD: set up input parameters */
+ input.count = 1;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = sizeof(dev->id[0]) * ATA_ID_WORDS;
+ in_params[0].buffer.pointer = (u8 *)dev->id;
+ /* Output buffer: _SDD has no output */
+
+ /* It's OK for _SDD to be missing too. */
+ swap_buf_le16(dev->id, ATA_ID_WORDS);
+ status = acpi_evaluate_object(dev->acpi_handle, "_SDD", &input, NULL);
+ swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+ err = ACPI_FAILURE(status) ? -EIO : 0;
+ if (err < 0)
+ ata_dev_printk(dev, KERN_WARNING,
+ "ACPI _SDD failed (AE 0x%x)\n", status);
+
+ return err;
+}
+
+/**
+ * ata_acpi_on_suspend - ATA ACPI hook called on suspend
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is about to be suspended. All
+ * devices are already put to sleep but the port_suspend() callback
+ * hasn't been executed yet. Error return from this function aborts
+ * suspend.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_acpi_on_suspend(struct ata_port *ap)
+{
+ /* nada */
+ return 0;
+}
+
+/**
+ * ata_acpi_on_resume - ATA ACPI hook called on resume
+ * @ap: target ATA port
+ *
+ * This function is called when @ap is resumed - right after port
+ * itself is resumed but before any EH action is taken.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_resume(struct ata_port *ap)
+{
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+ struct ata_device *dev;
+
+ if (ap->acpi_handle && gtm) {
+ /* _GTM valid */
+
+ /* restore timing parameters */
+ ata_acpi_stm(ap, gtm);
+
+ /* _GTF should immediately follow _STM so that it can
+ * use values set by _STM. Cache _GTF result and
+ * schedule _GTF.
+ */
+ ata_link_for_each_dev(dev, &ap->link) {
+ ata_acpi_clear_gtf(dev);
+ if (ata_dev_enabled(dev) &&
+ ata_dev_get_GTF(dev, NULL) >= 0)
+ dev->flags |= ATA_DFLAG_ACPI_PENDING;
+ }
+ } else {
+ /* SATA _GTF needs to be evaulated after _SDD and
+ * there's no reason to evaluate IDE _GTF early
+ * without _STM. Clear cache and schedule _GTF.
+ */
+ ata_link_for_each_dev(dev, &ap->link) {
+ ata_acpi_clear_gtf(dev);
+ if (ata_dev_enabled(dev))
+ dev->flags |= ATA_DFLAG_ACPI_PENDING;
+ }
+ }
+}
+
+/**
+ * ata_acpi_set_state - set the port power state
+ * @ap: target ATA port
+ * @state: state, on/off
+ *
+ * This function executes the _PS0/_PS3 ACPI method to set the power state.
+ * ACPI spec requires _PS0 when IDE power on and _PS3 when power off
+ */
+void ata_acpi_set_state(struct ata_port *ap, pm_message_t state)
+{
+ struct ata_device *dev;
+
+ if (!ap->acpi_handle || (ap->flags & ATA_FLAG_ACPI_SATA))
+ return;
+
+ /* channel first and then drives for power on and vica versa
+ for power off */
+ if (state.event == PM_EVENT_ON)
+ acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D0);
+
+ ata_link_for_each_dev(dev, &ap->link) {
+ if (dev->acpi_handle && ata_dev_enabled(dev))
+ acpi_bus_set_power(dev->acpi_handle,
+ state.event == PM_EVENT_ON ?
+ ACPI_STATE_D0 : ACPI_STATE_D3);
+ }
+ if (state.event != PM_EVENT_ON)
+ acpi_bus_set_power(ap->acpi_handle, ACPI_STATE_D3);
+}
+
+/**
+ * ata_acpi_on_devcfg - ATA ACPI hook called on device donfiguration
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be configured.
+ * IDENTIFY data might have been modified after this hook is run.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Positive number if IDENTIFY data needs to be refreshed, 0 if not,
+ * -errno on failure.
+ */
+int ata_acpi_on_devcfg(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+ int acpi_sata = ap->flags & ATA_FLAG_ACPI_SATA;
+ int nr_executed = 0;
+ int rc;
+
+ if (!dev->acpi_handle)
+ return 0;
+
+ /* do we need to do _GTF? */
+ if (!(dev->flags & ATA_DFLAG_ACPI_PENDING) &&
+ !(acpi_sata && (ehc->i.flags & ATA_EHI_DID_HARDRESET)))
+ return 0;
+
+ /* do _SDD if SATA */
+ if (acpi_sata) {
+ rc = ata_acpi_push_id(dev);
+ if (rc)
+ goto acpi_err;
+ }
+
+ /* do _GTF */
+ rc = ata_acpi_exec_tfs(dev, &nr_executed);
+ if (rc)
+ goto acpi_err;
+
+ dev->flags &= ~ATA_DFLAG_ACPI_PENDING;
+
+ /* refresh IDENTIFY page if any _GTF command has been executed */
+ if (nr_executed) {
+ rc = ata_dev_reread_id(dev, 0);
+ if (rc < 0) {
+ ata_dev_printk(dev, KERN_ERR, "failed to IDENTIFY "
+ "after ACPI commands\n");
+ return rc;
+ }
+ }
+
+ return 0;
+
+ acpi_err:
+ /* ignore evaluation failure if we can continue safely */
+ if (rc == -EINVAL && !nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+ return 0;
+
+ /* fail and let EH retry once more for unknown IO errors */
+ if (!(dev->flags & ATA_DFLAG_ACPI_FAILED)) {
+ dev->flags |= ATA_DFLAG_ACPI_FAILED;
+ return rc;
+ }
+
+ ata_dev_printk(dev, KERN_WARNING,
+ "ACPI: failed the second time, disabled\n");
+ dev->acpi_handle = NULL;
+
+ /* We can safely continue if no _GTF command has been executed
+ * and port is not frozen.
+ */
+ if (!nr_executed && !(ap->pflags & ATA_PFLAG_FROZEN))
+ return 0;
+
+ return rc;
+}
+
+/**
+ * ata_acpi_on_disable - ATA ACPI hook called when a device is disabled
+ * @dev: target ATA device
+ *
+ * This function is called when @dev is about to be disabled.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_acpi_on_disable(struct ata_device *dev)
+{
+ ata_acpi_clear_gtf(dev);
+}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
new file mode 100644
index 0000000..b07b2ff
--- /dev/null
+++ b/drivers/ata/libata-core.c
@@ -0,0 +1,6636 @@
+/*
+ * libata-core.c - helper library for ATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/ and
+ * http://www.sata-io.org/
+ *
+ * Standards documents from:
+ * http://www.t13.org (ATA standards, PCI DMA IDE spec)
+ * http://www.t10.org (SCSI MMC - for ATAPI MMC)
+ * http://www.sata-io.org (SATA)
+ * http://www.compactflash.org (CF)
+ * http://www.qic.org (QIC157 - Tape and DSC)
+ * http://www.ce-ata.org (CE-ATA: not supported)
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/byteorder.h>
+#include <linux/cdrom.h>
+
+#include "libata.h"
+
+
+/* debounce timing parameters in msecs { interval, duration, timeout } */
+const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
+const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
+const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
+
+const struct ata_port_operations ata_base_port_ops = {
+ .prereset = ata_std_prereset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+};
+
+const struct ata_port_operations sata_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .hardreset = sata_std_hardreset,
+};
+
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+ u16 heads, u16 sectors);
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
+static unsigned int ata_dev_set_feature(struct ata_device *dev,
+ u8 enable, u8 feature);
+static void ata_dev_xfermask(struct ata_device *dev);
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
+
+unsigned int ata_print_id = 1;
+static struct workqueue_struct *ata_wq;
+
+struct workqueue_struct *ata_aux_wq;
+
+struct ata_force_param {
+ const char *name;
+ unsigned int cbl;
+ int spd_limit;
+ unsigned long xfer_mask;
+ unsigned int horkage_on;
+ unsigned int horkage_off;
+ unsigned int lflags;
+};
+
+struct ata_force_ent {
+ int port;
+ int device;
+ struct ata_force_param param;
+};
+
+static struct ata_force_ent *ata_force_tbl;
+static int ata_force_tbl_size;
+
+static char ata_force_param_buf[PAGE_SIZE] __initdata;
+/* param_buf is thrown away after initialization, disallow read */
+module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
+
+static int atapi_enabled = 1;
+module_param(atapi_enabled, int, 0444);
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+
+static int atapi_dmadir = 0;
+module_param(atapi_dmadir, int, 0444);
+MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
+
+int atapi_passthru16 = 1;
+module_param(atapi_passthru16, int, 0444);
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
+
+int libata_fua = 0;
+module_param_named(fua, libata_fua, int, 0444);
+MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+
+static int ata_ignore_hpa;
+module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
+MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
+
+static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
+module_param_named(dma, libata_dma_mask, int, 0444);
+MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
+
+static int ata_probe_timeout;
+module_param(ata_probe_timeout, int, 0444);
+MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
+
+int libata_noacpi = 0;
+module_param_named(noacpi, libata_noacpi, int, 0444);
+MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+
+int libata_allow_tpm = 0;
+module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Library module for ATA devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
+/*
+ * Iterator helpers. Don't use directly.
+ *
+ * LOCKING:
+ * Host lock or EH context.
+ */
+struct ata_link *__ata_port_next_link(struct ata_port *ap,
+ struct ata_link *link, bool dev_only)
+{
+ /* NULL link indicates start of iteration */
+ if (!link) {
+ if (dev_only && sata_pmp_attached(ap))
+ return ap->pmp_link;
+ return &ap->link;
+ }
+
+ /* we just iterated over the host master link, what's next? */
+ if (link == &ap->link) {
+ if (!sata_pmp_attached(ap)) {
+ if (unlikely(ap->slave_link) && !dev_only)
+ return ap->slave_link;
+ return NULL;
+ }
+ return ap->pmp_link;
+ }
+
+ /* slave_link excludes PMP */
+ if (unlikely(link == ap->slave_link))
+ return NULL;
+
+ /* iterate to the next PMP link */
+ if (++link < ap->pmp_link + ap->nr_pmp_links)
+ return link;
+ return NULL;
+}
+
+/**
+ * ata_dev_phys_link - find physical link for a device
+ * @dev: ATA device to look up physical link for
+ *
+ * Look up physical link which @dev is attached to. Note that
+ * this is different from @dev->link only when @dev is on slave
+ * link. For all other cases, it's the same as @dev->link.
+ *
+ * LOCKING:
+ * Don't care.
+ *
+ * RETURNS:
+ * Pointer to the found physical link.
+ */
+struct ata_link *ata_dev_phys_link(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ if (!ap->slave_link)
+ return dev->link;
+ if (!dev->devno)
+ return &ap->link;
+ return ap->slave_link;
+}
+
+/**
+ * ata_force_cbl - force cable type according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force cable type according to libata.force and whine about it.
+ * The last entry which has matching port number is used, so it
+ * can be specified as part of device force parameters. For
+ * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
+ * same effect.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_force_cbl(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ if (fe->param.cbl == ATA_CBL_NONE)
+ continue;
+
+ ap->cbl = fe->param.cbl;
+ ata_port_printk(ap, KERN_NOTICE,
+ "FORCE: cable set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_link_limits - force link limits according to libata.force
+ * @link: ATA link of interest
+ *
+ * Force link flags and SATA spd limit according to libata.force
+ * and whine about it. When only the port part is specified
+ * (e.g. 1:), the limit applies to all links connected to both
+ * the host link and all fan-out ports connected via PMP. If the
+ * device part is specified as 0 (e.g. 1.00:), it specifies the
+ * first fan-out link not the host link. Device number 15 always
+ * points to the host link whether PMP is attached or not. If the
+ * controller has slave link, device number 16 points to it.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_link_limits(struct ata_link *link)
+{
+ bool did_spd = false;
+ int linkno = link->pmp;
+ int i;
+
+ if (ata_is_host_link(link))
+ linkno += 15;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != linkno)
+ continue;
+
+ /* only honor the first spd limit */
+ if (!did_spd && fe->param.spd_limit) {
+ link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: PHY spd limit set to %s\n",
+ fe->param.name);
+ did_spd = true;
+ }
+
+ /* let lflags stack */
+ if (fe->param.lflags) {
+ link->flags |= fe->param.lflags;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: link flag 0x%x forced -> 0x%x\n",
+ fe->param.lflags, link->flags);
+ }
+ }
+}
+
+/**
+ * ata_force_xfermask - force xfermask according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force xfer_mask according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_xfermask(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15/16 for devices attached to host port */
+ if (ata_is_host_link(dev->link))
+ alt_devno += 15;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+ unsigned long pio_mask, mwdma_mask, udma_mask;
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!fe->param.xfer_mask)
+ continue;
+
+ ata_unpack_xfermask(fe->param.xfer_mask,
+ &pio_mask, &mwdma_mask, &udma_mask);
+ if (udma_mask)
+ dev->udma_mask = udma_mask;
+ else if (mwdma_mask) {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = mwdma_mask;
+ } else {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = 0;
+ dev->pio_mask = pio_mask;
+ }
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: xfer_mask set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_horkage - force horkage according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force horkage according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_horkage(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15/16 for devices attached to host port */
+ if (ata_is_host_link(dev->link))
+ alt_devno += 15;
+
+ for (i = 0; i < ata_force_tbl_size; i++) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!(~dev->horkage & fe->param.horkage_on) &&
+ !(dev->horkage & fe->param.horkage_off))
+ continue;
+
+ dev->horkage |= fe->param.horkage_on;
+ dev->horkage &= ~fe->param.horkage_off;
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: horkage modified (%s)\n", fe->param.name);
+ }
+}
+
+/**
+ * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
+ * @opcode: SCSI opcode
+ *
+ * Determine ATAPI command type from @opcode.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
+ */
+int atapi_cmd_type(u8 opcode)
+{
+ switch (opcode) {
+ case GPCMD_READ_10:
+ case GPCMD_READ_12:
+ return ATAPI_READ;
+
+ case GPCMD_WRITE_10:
+ case GPCMD_WRITE_12:
+ case GPCMD_WRITE_AND_VERIFY_10:
+ return ATAPI_WRITE;
+
+ case GPCMD_READ_CD:
+ case GPCMD_READ_CD_MSF:
+ return ATAPI_READ_CD;
+
+ case ATA_16:
+ case ATA_12:
+ if (atapi_passthru16)
+ return ATAPI_PASS_THRU;
+ /* fall thru */
+ default:
+ return ATAPI_MISC;
+ }
+}
+
+/**
+ * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ * @tf: Taskfile to convert
+ * @pmp: Port multiplier port
+ * @is_cmd: This FIS is for command
+ * @fis: Buffer into which data will output
+ *
+ * Converts a standard ATA taskfile to a Serial ATA
+ * FIS structure (Register - Host to Device).
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
+{
+ fis[0] = 0x27; /* Register - Host to Device FIS */
+ fis[1] = pmp & 0xf; /* Port multiplier number*/
+ if (is_cmd)
+ fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
+
+ fis[2] = tf->command;
+ fis[3] = tf->feature;
+
+ fis[4] = tf->lbal;
+ fis[5] = tf->lbam;
+ fis[6] = tf->lbah;
+ fis[7] = tf->device;
+
+ fis[8] = tf->hob_lbal;
+ fis[9] = tf->hob_lbam;
+ fis[10] = tf->hob_lbah;
+ fis[11] = tf->hob_feature;
+
+ fis[12] = tf->nsect;
+ fis[13] = tf->hob_nsect;
+ fis[14] = 0;
+ fis[15] = tf->ctl;
+
+ fis[16] = 0;
+ fis[17] = 0;
+ fis[18] = 0;
+ fis[19] = 0;
+}
+
+/**
+ * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ * @fis: Buffer from which data will be input
+ * @tf: Taskfile to output
+ *
+ * Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+ tf->command = fis[2]; /* status */
+ tf->feature = fis[3]; /* error */
+
+ tf->lbal = fis[4];
+ tf->lbam = fis[5];
+ tf->lbah = fis[6];
+ tf->device = fis[7];
+
+ tf->hob_lbal = fis[8];
+ tf->hob_lbam = fis[9];
+ tf->hob_lbah = fis[10];
+
+ tf->nsect = fis[12];
+ tf->hob_nsect = fis[13];
+}
+
+static const u8 ata_rw_cmds[] = {
+ /* pio multi */
+ ATA_CMD_READ_MULTI,
+ ATA_CMD_WRITE_MULTI,
+ ATA_CMD_READ_MULTI_EXT,
+ ATA_CMD_WRITE_MULTI_EXT,
+ 0,
+ 0,
+ 0,
+ ATA_CMD_WRITE_MULTI_FUA_EXT,
+ /* pio */
+ ATA_CMD_PIO_READ,
+ ATA_CMD_PIO_WRITE,
+ ATA_CMD_PIO_READ_EXT,
+ ATA_CMD_PIO_WRITE_EXT,
+ 0,
+ 0,
+ 0,
+ 0,
+ /* dma */
+ ATA_CMD_READ,
+ ATA_CMD_WRITE,
+ ATA_CMD_READ_EXT,
+ ATA_CMD_WRITE_EXT,
+ 0,
+ 0,
+ 0,
+ ATA_CMD_WRITE_FUA_EXT
+};
+
+/**
+ * ata_rwcmd_protocol - set taskfile r/w commands and protocol
+ * @tf: command to examine and configure
+ * @dev: device tf belongs to
+ *
+ * Examine the device configuration and tf->flags to calculate
+ * the proper read/write commands and protocol to use.
+ *
+ * LOCKING:
+ * caller.
+ */
+static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
+{
+ u8 cmd;
+
+ int index, fua, lba48, write;
+
+ fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
+ lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
+ write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+ if (dev->flags & ATA_DFLAG_PIO) {
+ tf->protocol = ATA_PROT_PIO;
+ index = dev->multi_count ? 0 : 8;
+ } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
+ /* Unable to use DMA due to host limitation */
+ tf->protocol = ATA_PROT_PIO;
+ index = dev->multi_count ? 0 : 8;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ index = 16;
+ }
+
+ cmd = ata_rw_cmds[index + fua + lba48 + write];
+ if (cmd) {
+ tf->command = cmd;
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * ata_tf_read_block - Read block address from ATA taskfile
+ * @tf: ATA taskfile of interest
+ * @dev: ATA device @tf belongs to
+ *
+ * LOCKING:
+ * None.
+ *
+ * Read block address from @tf. This function can handle all
+ * three address formats - LBA, LBA48 and CHS. tf->protocol and
+ * flags select the address format to use.
+ *
+ * RETURNS:
+ * Block address read from @tf.
+ */
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+{
+ u64 block = 0;
+
+ if (tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ block |= (u64)tf->hob_lbah << 40;
+ block |= (u64)tf->hob_lbam << 32;
+ block |= (u64)tf->hob_lbal << 24;
+ } else
+ block |= (tf->device & 0xf) << 24;
+
+ block |= tf->lbah << 16;
+ block |= tf->lbam << 8;
+ block |= tf->lbal;
+ } else {
+ u32 cyl, head, sect;
+
+ cyl = tf->lbam | (tf->lbah << 8);
+ head = tf->device & 0xf;
+ sect = tf->lbal;
+
+ block = (cyl * dev->heads + head) * dev->sectors + sect;
+ }
+
+ return block;
+}
+
+/**
+ * ata_build_rw_tf - Build ATA taskfile for given read/write request
+ * @tf: Target ATA taskfile
+ * @dev: ATA device @tf belongs to
+ * @block: Block address
+ * @n_block: Number of blocks
+ * @tf_flags: RW/FUA etc...
+ * @tag: tag
+ *
+ * LOCKING:
+ * None.
+ *
+ * Build ATA taskfile @tf for read/write request described by
+ * @block, @n_block, @tf_flags and @tag on @dev.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -ERANGE if the request is too large for @dev,
+ * -EINVAL if the request is invalid.
+ */
+int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+ u64 block, u32 n_block, unsigned int tf_flags,
+ unsigned int tag)
+{
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->flags |= tf_flags;
+
+ if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
+ /* yay, NCQ */
+ if (!lba_48_ok(block, n_block))
+ return -ERANGE;
+
+ tf->protocol = ATA_PROT_NCQ;
+ tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+
+ if (tf->flags & ATA_TFLAG_WRITE)
+ tf->command = ATA_CMD_FPDMA_WRITE;
+ else
+ tf->command = ATA_CMD_FPDMA_READ;
+
+ tf->nsect = tag << 3;
+ tf->hob_feature = (n_block >> 8) & 0xff;
+ tf->feature = n_block & 0xff;
+
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+
+ tf->device = 1 << 6;
+ if (tf->flags & ATA_TFLAG_FUA)
+ tf->device |= 1 << 7;
+ } else if (dev->flags & ATA_DFLAG_LBA) {
+ tf->flags |= ATA_TFLAG_LBA;
+
+ if (lba_28_ok(block, n_block)) {
+ /* use LBA28 */
+ tf->device |= (block >> 24) & 0xf;
+ } else if (lba_48_ok(block, n_block)) {
+ if (!(dev->flags & ATA_DFLAG_LBA48))
+ return -ERANGE;
+
+ /* use LBA48 */
+ tf->flags |= ATA_TFLAG_LBA48;
+
+ tf->hob_nsect = (n_block >> 8) & 0xff;
+
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ } else
+ /* request too large even for LBA48 */
+ return -ERANGE;
+
+ if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ return -EINVAL;
+
+ tf->nsect = n_block & 0xff;
+
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+
+ tf->device |= ATA_LBA;
+ } else {
+ /* CHS */
+ u32 sect, head, cyl, track;
+
+ /* The request -may- be too large for CHS addressing. */
+ if (!lba_28_ok(block, n_block))
+ return -ERANGE;
+
+ if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ return -EINVAL;
+
+ /* Convert LBA to CHS */
+ track = (u32)block / dev->sectors;
+ cyl = track / dev->heads;
+ head = track % dev->heads;
+ sect = (u32)block % dev->sectors + 1;
+
+ DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+ (u32)block, track, cyl, head, sect);
+
+ /* Check whether the converted CHS can fit.
+ Cylinder: 0-65535
+ Head: 0-15
+ Sector: 1-255*/
+ if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+ return -ERANGE;
+
+ tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+ tf->lbal = sect;
+ tf->lbam = cyl;
+ tf->lbah = cyl >> 8;
+ tf->device |= head;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
+ * @pio_mask: pio_mask
+ * @mwdma_mask: mwdma_mask
+ * @udma_mask: udma_mask
+ *
+ * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
+ * unsigned int xfer_mask.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Packed xfer_mask.
+ */
+unsigned long ata_pack_xfermask(unsigned long pio_mask,
+ unsigned long mwdma_mask,
+ unsigned long udma_mask)
+{
+ return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
+ ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
+ ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
+}
+
+/**
+ * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
+ * @xfer_mask: xfer_mask to unpack
+ * @pio_mask: resulting pio_mask
+ * @mwdma_mask: resulting mwdma_mask
+ * @udma_mask: resulting udma_mask
+ *
+ * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
+ * Any NULL distination masks will be ignored.
+ */
+void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
+ unsigned long *mwdma_mask, unsigned long *udma_mask)
+{
+ if (pio_mask)
+ *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
+ if (mwdma_mask)
+ *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
+ if (udma_mask)
+ *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
+}
+
+static const struct ata_xfer_ent {
+ int shift, bits;
+ u8 base;
+} ata_xfer_tbl[] = {
+ { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
+ { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
+ { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
+ { -1, },
+};
+
+/**
+ * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
+ * @xfer_mask: xfer_mask of interest
+ *
+ * Return matching XFER_* value for @xfer_mask. Only the highest
+ * bit of @xfer_mask is considered.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Matching XFER_* value, 0xff if no match found.
+ */
+u8 ata_xfer_mask2mode(unsigned long xfer_mask)
+{
+ int highbit = fls(xfer_mask) - 1;
+ const struct ata_xfer_ent *ent;
+
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
+ return ent->base + highbit - ent->shift;
+ return 0xff;
+}
+
+/**
+ * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
+ * @xfer_mode: XFER_* of interest
+ *
+ * Return matching xfer_mask for @xfer_mode.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Matching xfer_mask, 0 if no match found.
+ */
+unsigned long ata_xfer_mode2mask(u8 xfer_mode)
+{
+ const struct ata_xfer_ent *ent;
+
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
+ return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
+ & ~((1 << ent->shift) - 1);
+ return 0;
+}
+
+/**
+ * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
+ * @xfer_mode: XFER_* of interest
+ *
+ * Return matching xfer_shift for @xfer_mode.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Matching xfer_shift, -1 if no match found.
+ */
+int ata_xfer_mode2shift(unsigned long xfer_mode)
+{
+ const struct ata_xfer_ent *ent;
+
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
+ return ent->shift;
+ return -1;
+}
+
+/**
+ * ata_mode_string - convert xfer_mask to string
+ * @xfer_mask: mask of bits supported; only highest bit counts.
+ *
+ * Determine string which represents the highest speed
+ * (highest bit in @modemask).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Constant C string representing highest speed listed in
+ * @mode_mask, or the constant C string "<n/a>".
+ */
+const char *ata_mode_string(unsigned long xfer_mask)
+{
+ static const char * const xfer_mode_str[] = {
+ "PIO0",
+ "PIO1",
+ "PIO2",
+ "PIO3",
+ "PIO4",
+ "PIO5",
+ "PIO6",
+ "MWDMA0",
+ "MWDMA1",
+ "MWDMA2",
+ "MWDMA3",
+ "MWDMA4",
+ "UDMA/16",
+ "UDMA/25",
+ "UDMA/33",
+ "UDMA/44",
+ "UDMA/66",
+ "UDMA/100",
+ "UDMA/133",
+ "UDMA7",
+ };
+ int highbit;
+
+ highbit = fls(xfer_mask) - 1;
+ if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
+ return xfer_mode_str[highbit];
+ return "<n/a>";
+}
+
+static const char *sata_spd_string(unsigned int spd)
+{
+ static const char * const spd_str[] = {
+ "1.5 Gbps",
+ "3.0 Gbps",
+ };
+
+ if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
+ return "<unknown>";
+ return spd_str[spd - 1];
+}
+
+void ata_dev_disable(struct ata_device *dev)
+{
+ if (ata_dev_enabled(dev)) {
+ if (ata_msg_drv(dev->link->ap))
+ ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+ ata_acpi_on_disable(dev);
+ ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
+ ATA_DNXFER_QUIET);
+ dev->class++;
+ }
+}
+
+static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ u32 scontrol;
+ unsigned int err_mask;
+ int rc;
+
+ /*
+ * disallow DIPM for drivers which haven't set
+ * ATA_FLAG_IPM. This is because when DIPM is enabled,
+ * phy ready will be set in the interrupt status on
+ * state changes, which will cause some drivers to
+ * think there are errors - additionally drivers will
+ * need to disable hot plug.
+ */
+ if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
+ ap->pm_policy = NOT_AVAILABLE;
+ return -EINVAL;
+ }
+
+ /*
+ * For DIPM, we will only enable it for the
+ * min_power setting.
+ *
+ * Why? Because Disks are too stupid to know that
+ * If the host rejects a request to go to SLUMBER
+ * they should retry at PARTIAL, and instead it
+ * just would give up. So, for medium_power to
+ * work at all, we need to only allow HIPM.
+ */
+ rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+ if (rc)
+ return rc;
+
+ switch (policy) {
+ case MIN_POWER:
+ /* no restrictions on IPM transitions */
+ scontrol &= ~(0x3 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /* enable DIPM */
+ if (dev->flags & ATA_DFLAG_DIPM)
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ break;
+ case MEDIUM_POWER:
+ /* allow IPM to PARTIAL */
+ scontrol &= ~(0x1 << 8);
+ scontrol |= (0x2 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /*
+ * we don't have to disable DIPM since IPM flags
+ * disallow transitions to SLUMBER, which effectively
+ * disable DIPM if it does not support PARTIAL
+ */
+ break;
+ case NOT_AVAILABLE:
+ case MAX_PERFORMANCE:
+ /* disable all IPM transitions */
+ scontrol |= (0x3 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /*
+ * we don't have to disable DIPM since IPM flags
+ * disallow all transitions which effectively
+ * disable DIPM anyway.
+ */
+ break;
+ }
+
+ /* FIXME: handle SET FEATURES failure */
+ (void) err_mask;
+
+ return 0;
+}
+
+/**
+ * ata_dev_enable_pm - enable SATA interface power management
+ * @dev: device to enable power management
+ * @policy: the link power management policy
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power
+ * policy, and then call driver specific callbacks for
+ * enabling Host Initiated Power management.
+ *
+ * Locking: Caller.
+ * Returns: -EINVAL if IPM is not supported, 0 otherwise.
+ */
+void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
+{
+ int rc = 0;
+ struct ata_port *ap = dev->link->ap;
+
+ /* set HIPM first, then DIPM */
+ if (ap->ops->enable_pm)
+ rc = ap->ops->enable_pm(ap, policy);
+ if (rc)
+ goto enable_pm_out;
+ rc = ata_dev_set_dipm(dev, policy);
+
+enable_pm_out:
+ if (rc)
+ ap->pm_policy = MAX_PERFORMANCE;
+ else
+ ap->pm_policy = policy;
+ return /* rc */; /* hopefully we can use 'rc' eventually */
+}
+
+#ifdef CONFIG_PM
+/**
+ * ata_dev_disable_pm - disable SATA interface power management
+ * @dev: device to disable power management
+ *
+ * Disable SATA Interface power management. This will disable
+ * Device Interface Power Management (DIPM) without changing
+ * policy, call driver specific callbacks for disabling Host
+ * Initiated Power management.
+ *
+ * Locking: Caller.
+ * Returns: void
+ */
+static void ata_dev_disable_pm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ ata_dev_set_dipm(dev, MAX_PERFORMANCE);
+ if (ap->ops->disable_pm)
+ ap->ops->disable_pm(ap);
+}
+#endif /* CONFIG_PM */
+
+void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
+{
+ ap->pm_policy = policy;
+ ap->link.eh_info.action |= ATA_EH_LPM;
+ ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
+ ata_port_schedule_eh(ap);
+}
+
+#ifdef CONFIG_PM
+static void ata_lpm_enable(struct ata_host *host)
+{
+ struct ata_link *link;
+ struct ata_port *ap;
+ struct ata_device *dev;
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ ap = host->ports[i];
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link)
+ ata_dev_disable_pm(dev);
+ }
+ }
+}
+
+static void ata_lpm_disable(struct ata_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ ata_lpm_schedule(ap, ap->pm_policy);
+ }
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ata_dev_classify - determine device type based on ATA-spec signature
+ * @tf: ATA taskfile register set for device to be identified
+ *
+ * Determine from taskfile register contents whether a device is
+ * ATA or ATAPI, as per "Signature and persistence" section
+ * of ATA/PI spec (volume 1, sect 5.14).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
+ * %ATA_DEV_UNKNOWN the event of failure.
+ */
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
+{
+ /* Apple's open source Darwin code hints that some devices only
+ * put a proper signature into the LBA mid/high registers,
+ * So, we only check those. It's sufficient for uniqueness.
+ *
+ * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
+ * signatures for ATA and ATAPI devices attached on SerialATA,
+ * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
+ * spec has never mentioned about using different signatures
+ * for ATA/ATAPI devices. Then, Serial ATA II: Port
+ * Multiplier specification began to use 0x69/0x96 to identify
+ * port multpliers and 0x3c/0xc3 to identify SEMB device.
+ * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
+ * 0x69/0x96 shortly and described them as reserved for
+ * SerialATA.
+ *
+ * We follow the current spec and consider that 0x69/0x96
+ * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
+ */
+ if ((tf->lbam == 0) && (tf->lbah == 0)) {
+ DPRINTK("found ATA device by sig\n");
+ return ATA_DEV_ATA;
+ }
+
+ if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
+ DPRINTK("found ATAPI device by sig\n");
+ return ATA_DEV_ATAPI;
+ }
+
+ if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
+ DPRINTK("found PMP device by sig\n");
+ return ATA_DEV_PMP;
+ }
+
+ if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
+ printk(KERN_INFO "ata: SEMB device ignored\n");
+ return ATA_DEV_SEMB_UNSUP; /* not yet */
+ }
+
+ DPRINTK("unknown device\n");
+ return ATA_DEV_UNKNOWN;
+}
+
+/**
+ * ata_id_string - Convert IDENTIFY DEVICE page into string
+ * @id: IDENTIFY DEVICE results we will examine
+ * @s: string into which data is output
+ * @ofs: offset into identify device page
+ * @len: length of string to return. must be an even number.
+ *
+ * The strings in the IDENTIFY DEVICE page are broken up into
+ * 16-bit chunks. Run through the string, and output each
+ * 8-bit chunk linearly, regardless of platform.
+ *
+ * LOCKING:
+ * caller.
+ */
+
+void ata_id_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned int c;
+
+ BUG_ON(len & 1);
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+/**
+ * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
+ * @id: IDENTIFY DEVICE results we will examine
+ * @s: string into which data is output
+ * @ofs: offset into identify device page
+ * @len: length of string to return. must be an odd number.
+ *
+ * This function is identical to ata_id_string except that it
+ * trims trailing spaces and terminates the resulting string with
+ * null. @len must be actual maximum length (even number) + 1.
+ *
+ * LOCKING:
+ * caller.
+ */
+void ata_id_c_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len)
+{
+ unsigned char *p;
+
+ ata_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
+static u64 ata_id_n_sectors(const u16 *id)
+{
+ if (ata_id_has_lba(id)) {
+ if (ata_id_has_lba48(id))
+ return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+ else
+ return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+ } else {
+ if (ata_id_current_chs_valid(id))
+ return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
+ id[ATA_ID_CUR_SECTORS];
+ else
+ return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
+ id[ATA_ID_SECTORS];
+ }
+}
+
+u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
+{
+ u64 sectors = 0;
+
+ sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
+ sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
+ sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
+ sectors |= (tf->lbah & 0xff) << 16;
+ sectors |= (tf->lbam & 0xff) << 8;
+ sectors |= (tf->lbal & 0xff);
+
+ return sectors;
+}
+
+u64 ata_tf_to_lba(const struct ata_taskfile *tf)
+{
+ u64 sectors = 0;
+
+ sectors |= (tf->device & 0x0f) << 24;
+ sectors |= (tf->lbah & 0xff) << 16;
+ sectors |= (tf->lbam & 0xff) << 8;
+ sectors |= (tf->lbal & 0xff);
+
+ return sectors;
+}
+
+/**
+ * ata_read_native_max_address - Read native max address
+ * @dev: target device
+ * @max_sectors: out parameter for the result native max address
+ *
+ * Perform an LBA48 or LBA28 native size query upon the device in
+ * question.
+ *
+ * RETURNS:
+ * 0 on success, -EACCES if command is aborted by the drive.
+ * -EIO on other errors.
+ */
+static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
+{
+ unsigned int err_mask;
+ struct ata_taskfile tf;
+ int lba48 = ata_id_has_lba48(dev->id);
+
+ ata_tf_init(dev, &tf);
+
+ /* always clear all address registers */
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+
+ if (lba48) {
+ tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
+ tf.flags |= ATA_TFLAG_LBA48;
+ } else
+ tf.command = ATA_CMD_READ_NATIVE_MAX;
+
+ tf.protocol |= ATA_PROT_NODATA;
+ tf.device |= ATA_LBA;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_WARNING, "failed to read native "
+ "max address (err_mask=0x%x)\n", err_mask);
+ if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ return -EACCES;
+ return -EIO;
+ }
+
+ if (lba48)
+ *max_sectors = ata_tf_to_lba48(&tf) + 1;
+ else
+ *max_sectors = ata_tf_to_lba(&tf) + 1;
+ if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
+ (*max_sectors)--;
+ return 0;
+}
+
+/**
+ * ata_set_max_sectors - Set max sectors
+ * @dev: target device
+ * @new_sectors: new max sectors value to set for the device
+ *
+ * Set max sectors of @dev to @new_sectors.
+ *
+ * RETURNS:
+ * 0 on success, -EACCES if command is aborted or denied (due to
+ * previous non-volatile SET_MAX) by the drive. -EIO on other
+ * errors.
+ */
+static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
+{
+ unsigned int err_mask;
+ struct ata_taskfile tf;
+ int lba48 = ata_id_has_lba48(dev->id);
+
+ new_sectors--;
+
+ ata_tf_init(dev, &tf);
+
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+
+ if (lba48) {
+ tf.command = ATA_CMD_SET_MAX_EXT;
+ tf.flags |= ATA_TFLAG_LBA48;
+
+ tf.hob_lbal = (new_sectors >> 24) & 0xff;
+ tf.hob_lbam = (new_sectors >> 32) & 0xff;
+ tf.hob_lbah = (new_sectors >> 40) & 0xff;
+ } else {
+ tf.command = ATA_CMD_SET_MAX;
+
+ tf.device |= (new_sectors >> 24) & 0xf;
+ }
+
+ tf.protocol |= ATA_PROT_NODATA;
+ tf.device |= ATA_LBA;
+
+ tf.lbal = (new_sectors >> 0) & 0xff;
+ tf.lbam = (new_sectors >> 8) & 0xff;
+ tf.lbah = (new_sectors >> 16) & 0xff;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_WARNING, "failed to set "
+ "max address (err_mask=0x%x)\n", err_mask);
+ if (err_mask == AC_ERR_DEV &&
+ (tf.feature & (ATA_ABORTED | ATA_IDNF)))
+ return -EACCES;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_hpa_resize - Resize a device with an HPA set
+ * @dev: Device to resize
+ *
+ * Read the size of an LBA28 or LBA48 disk with HPA features and resize
+ * it if required to the full size of the media. The caller must check
+ * the drive has the HPA feature set enabled.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_hpa_resize(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+ u64 sectors = ata_id_n_sectors(dev->id);
+ u64 native_sectors;
+ int rc;
+
+ /* do we need to do it? */
+ if (dev->class != ATA_DEV_ATA ||
+ !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
+ (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
+ return 0;
+
+ /* read native max address */
+ rc = ata_read_native_max_address(dev, &native_sectors);
+ if (rc) {
+ /* If device aborted the command or HPA isn't going to
+ * be unlocked, skip HPA resizing.
+ */
+ if (rc == -EACCES || !ata_ignore_hpa) {
+ ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
+ "broken, skipping HPA handling\n");
+ dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+
+ /* we can continue if device aborted the command */
+ if (rc == -EACCES)
+ rc = 0;
+ }
+
+ return rc;
+ }
+
+ /* nothing to do? */
+ if (native_sectors <= sectors || !ata_ignore_hpa) {
+ if (!print_info || native_sectors == sectors)
+ return 0;
+
+ if (native_sectors > sectors)
+ ata_dev_printk(dev, KERN_INFO,
+ "HPA detected: current %llu, native %llu\n",
+ (unsigned long long)sectors,
+ (unsigned long long)native_sectors);
+ else if (native_sectors < sectors)
+ ata_dev_printk(dev, KERN_WARNING,
+ "native sectors (%llu) is smaller than "
+ "sectors (%llu)\n",
+ (unsigned long long)native_sectors,
+ (unsigned long long)sectors);
+ return 0;
+ }
+
+ /* let's unlock HPA */
+ rc = ata_set_max_sectors(dev, native_sectors);
+ if (rc == -EACCES) {
+ /* if device aborted the command, skip HPA resizing */
+ ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
+ "(%llu -> %llu), skipping HPA handling\n",
+ (unsigned long long)sectors,
+ (unsigned long long)native_sectors);
+ dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+ return 0;
+ } else if (rc)
+ return rc;
+
+ /* re-read IDENTIFY data */
+ rc = ata_dev_reread_id(dev, 0);
+ if (rc) {
+ ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
+ "data after HPA resizing\n");
+ return rc;
+ }
+
+ if (print_info) {
+ u64 new_sectors = ata_id_n_sectors(dev->id);
+ ata_dev_printk(dev, KERN_INFO,
+ "HPA unlocked: %llu -> %llu, native %llu\n",
+ (unsigned long long)sectors,
+ (unsigned long long)new_sectors,
+ (unsigned long long)native_sectors);
+ }
+
+ return 0;
+}
+
+/**
+ * ata_dump_id - IDENTIFY DEVICE info debugging output
+ * @id: IDENTIFY DEVICE page to dump
+ *
+ * Dump selected 16-bit words from the given IDENTIFY DEVICE
+ * page.
+ *
+ * LOCKING:
+ * caller.
+ */
+
+static inline void ata_dump_id(const u16 *id)
+{
+ DPRINTK("49==0x%04x "
+ "53==0x%04x "
+ "63==0x%04x "
+ "64==0x%04x "
+ "75==0x%04x \n",
+ id[49],
+ id[53],
+ id[63],
+ id[64],
+ id[75]);
+ DPRINTK("80==0x%04x "
+ "81==0x%04x "
+ "82==0x%04x "
+ "83==0x%04x "
+ "84==0x%04x \n",
+ id[80],
+ id[81],
+ id[82],
+ id[83],
+ id[84]);
+ DPRINTK("88==0x%04x "
+ "93==0x%04x\n",
+ id[88],
+ id[93]);
+}
+
+/**
+ * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
+ * @id: IDENTIFY data to compute xfer mask from
+ *
+ * Compute the xfermask for this device. This is not as trivial
+ * as it seems if we must consider early devices correctly.
+ *
+ * FIXME: pre IDE drive timing (do we care ?).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Computed xfermask
+ */
+unsigned long ata_id_xfermask(const u16 *id)
+{
+ unsigned long pio_mask, mwdma_mask, udma_mask;
+
+ /* Usual case. Word 53 indicates word 64 is valid */
+ if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
+ pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
+ pio_mask <<= 3;
+ pio_mask |= 0x7;
+ } else {
+ /* If word 64 isn't valid then Word 51 high byte holds
+ * the PIO timing number for the maximum. Turn it into
+ * a mask.
+ */
+ u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
+ if (mode < 5) /* Valid PIO range */
+ pio_mask = (2 << mode) - 1;
+ else
+ pio_mask = 1;
+
+ /* But wait.. there's more. Design your standards by
+ * committee and you too can get a free iordy field to
+ * process. However its the speeds not the modes that
+ * are supported... Note drivers using the timing API
+ * will get this right anyway
+ */
+ }
+
+ mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
+
+ if (ata_id_is_cfa(id)) {
+ /*
+ * Process compact flash extended modes
+ */
+ int pio = id[163] & 0x7;
+ int dma = (id[163] >> 3) & 7;
+
+ if (pio)
+ pio_mask |= (1 << 5);
+ if (pio > 1)
+ pio_mask |= (1 << 6);
+ if (dma)
+ mwdma_mask |= (1 << 3);
+ if (dma > 1)
+ mwdma_mask |= (1 << 4);
+ }
+
+ udma_mask = 0;
+ if (id[ATA_ID_FIELD_VALID] & (1 << 2))
+ udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
+
+ return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+}
+
+/**
+ * ata_pio_queue_task - Queue port_task
+ * @ap: The ata_port to queue port_task for
+ * @data: data for @fn to use
+ * @delay: delay time in msecs for workqueue function
+ *
+ * Schedule @fn(@data) for execution after @delay jiffies using
+ * port_task. There is one port_task per port and it's the
+ * user(low level driver)'s responsibility to make sure that only
+ * one task is active at any given time.
+ *
+ * libata core layer takes care of synchronization between
+ * port_task and EH. ata_pio_queue_task() may be ignored for EH
+ * synchronization.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
+{
+ ap->port_task_data = data;
+
+ /* may fail if ata_port_flush_task() in progress */
+ queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
+}
+
+/**
+ * ata_port_flush_task - Flush port_task
+ * @ap: The ata_port to flush port_task for
+ *
+ * After this function completes, port_task is guranteed not to
+ * be running or scheduled.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_port_flush_task(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ cancel_rearming_delayed_work(&ap->port_task);
+
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+}
+
+static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+{
+ struct completion *waiting = qc->private_data;
+
+ complete(waiting);
+}
+
+/**
+ * ata_exec_internal_sg - execute libata internal command
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+ * @dma_dir: Data tranfer direction of the command
+ * @sgl: sg list for the data buffer of the command
+ * @n_elem: Number of sg entries
+ * @timeout: Timeout in msecs (0 for default)
+ *
+ * Executes libata internal command with timeout. @tf contains
+ * command on entry and result on return. Timeout and error
+ * conditions are reported via return value. No recovery action
+ * is taken after a command times out. It's caller's duty to
+ * clean up after timeout.
+ *
+ * LOCKING:
+ * None. Should be called with kernel context, might sleep.
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal_sg(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, struct scatterlist *sgl,
+ unsigned int n_elem, unsigned long timeout)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ u8 command = tf->command;
+ int auto_timeout = 0;
+ struct ata_queued_cmd *qc;
+ unsigned int tag, preempted_tag;
+ u32 preempted_sactive, preempted_qc_active;
+ int preempted_nr_active_links;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ unsigned long flags;
+ unsigned int err_mask;
+ int rc;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* no internal command while frozen */
+ if (ap->pflags & ATA_PFLAG_FROZEN) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return AC_ERR_SYSTEM;
+ }
+
+ /* initialize internal qc */
+
+ /* XXX: Tag 0 is used for drivers with legacy EH as some
+ * drivers choke if any other tag is given. This breaks
+ * ata_tag_internal() test for those drivers. Don't use new
+ * EH stuff without converting to it.
+ */
+ if (ap->ops->error_handler)
+ tag = ATA_TAG_INTERNAL;
+ else
+ tag = 0;
+
+ if (test_and_set_bit(tag, &ap->qc_allocated))
+ BUG();
+ qc = __ata_qc_from_tag(ap, tag);
+
+ qc->tag = tag;
+ qc->scsicmd = NULL;
+ qc->ap = ap;
+ qc->dev = dev;
+ ata_qc_reinit(qc);
+
+ preempted_tag = link->active_tag;
+ preempted_sactive = link->sactive;
+ preempted_qc_active = ap->qc_active;
+ preempted_nr_active_links = ap->nr_active_links;
+ link->active_tag = ATA_TAG_POISON;
+ link->sactive = 0;
+ ap->qc_active = 0;
+ ap->nr_active_links = 0;
+
+ /* prepare & issue qc */
+ qc->tf = *tf;
+ if (cdb)
+ memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
+ qc->flags |= ATA_QCFLAG_RESULT_TF;
+ qc->dma_dir = dma_dir;
+ if (dma_dir != DMA_NONE) {
+ unsigned int i, buflen = 0;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, n_elem, i)
+ buflen += sg->length;
+
+ ata_sg_init(qc, sgl, n_elem);
+ qc->nbytes = buflen;
+ }
+
+ qc->private_data = &wait;
+ qc->complete_fn = ata_qc_complete_internal;
+
+ ata_qc_issue(qc);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if (!timeout) {
+ if (ata_probe_timeout)
+ timeout = ata_probe_timeout * 1000;
+ else {
+ timeout = ata_internal_cmd_timeout(dev, command);
+ auto_timeout = 1;
+ }
+ }
+
+ rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
+
+ ata_port_flush_task(ap);
+
+ if (!rc) {
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* We're racing with irq here. If we lose, the
+ * following test prevents us from completing the qc
+ * twice. If we win, the port is frozen and will be
+ * cleaned up by ->post_internal_cmd().
+ */
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+
+ if (ap->ops->error_handler)
+ ata_port_freeze(ap);
+ else
+ ata_qc_complete(qc);
+
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING,
+ "qc timeout (cmd 0x%x)\n", command);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+
+ /* do post_internal_cmd */
+ if (ap->ops->post_internal_cmd)
+ ap->ops->post_internal_cmd(qc);
+
+ /* perform minimal error analysis */
+ if (qc->flags & ATA_QCFLAG_FAILED) {
+ if (qc->result_tf.command & (ATA_ERR | ATA_DF))
+ qc->err_mask |= AC_ERR_DEV;
+
+ if (!qc->err_mask)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (qc->err_mask & ~AC_ERR_OTHER)
+ qc->err_mask &= ~AC_ERR_OTHER;
+ }
+
+ /* finish up */
+ spin_lock_irqsave(ap->lock, flags);
+
+ *tf = qc->result_tf;
+ err_mask = qc->err_mask;
+
+ ata_qc_free(qc);
+ link->active_tag = preempted_tag;
+ link->sactive = preempted_sactive;
+ ap->qc_active = preempted_qc_active;
+ ap->nr_active_links = preempted_nr_active_links;
+
+ /* XXX - Some LLDDs (sata_mv) disable port on command failure.
+ * Until those drivers are fixed, we detect the condition
+ * here, fail the command with AC_ERR_SYSTEM and reenable the
+ * port.
+ *
+ * Note that this doesn't change any behavior as internal
+ * command failure results in disabling the device in the
+ * higher layer for LLDDs without new reset/EH callbacks.
+ *
+ * Kill the following code as soon as those drivers are fixed.
+ */
+ if (ap->flags & ATA_FLAG_DISABLED) {
+ err_mask |= AC_ERR_SYSTEM;
+ ata_port_probe(ap);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
+ ata_internal_cmd_timed_out(dev, command);
+
+ return err_mask;
+}
+
+/**
+ * ata_exec_internal - execute libata internal command
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+ * @dma_dir: Data tranfer direction of the command
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
+ * @timeout: Timeout in msecs (0 for default)
+ *
+ * Wrapper around ata_exec_internal_sg() which takes simple
+ * buffer instead of sg list.
+ *
+ * LOCKING:
+ * None. Should be called with kernel context, might sleep.
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, void *buf, unsigned int buflen,
+ unsigned long timeout)
+{
+ struct scatterlist *psg = NULL, sg;
+ unsigned int n_elem = 0;
+
+ if (dma_dir != DMA_NONE) {
+ WARN_ON(!buf);
+ sg_init_one(&sg, buf, buflen);
+ psg = &sg;
+ n_elem++;
+ }
+
+ return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
+ timeout);
+}
+
+/**
+ * ata_do_simple_cmd - execute simple internal command
+ * @dev: Device to which the command is sent
+ * @cmd: Opcode to execute
+ *
+ * Execute a 'simple' command, that only consists of the opcode
+ * 'cmd' itself, without filling any other registers
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
+{
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = cmd;
+ tf.flags |= ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+}
+
+/**
+ * ata_pio_need_iordy - check if iordy needed
+ * @adev: ATA device
+ *
+ * Check if the current speed of the device requires IORDY. Used
+ * by various controllers for chip configuration.
+ */
+
+unsigned int ata_pio_need_iordy(const struct ata_device *adev)
+{
+ /* Controller doesn't support IORDY. Probably a pointless check
+ as the caller should know this */
+ if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
+ return 0;
+ /* PIO3 and higher it is mandatory */
+ if (adev->pio_mode > XFER_PIO_2)
+ return 1;
+ /* We turn it on when possible */
+ if (ata_id_has_iordy(adev->id))
+ return 1;
+ return 0;
+}
+
+/**
+ * ata_pio_mask_no_iordy - Return the non IORDY mask
+ * @adev: ATA device
+ *
+ * Compute the highest mode possible if we are not using iordy. Return
+ * -1 if no iordy mode is available.
+ */
+
+static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
+{
+ /* If we have no drive specific rule, then PIO 2 is non IORDY */
+ if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
+ u16 pio = adev->id[ATA_ID_EIDE_PIO];
+ /* Is the speed faster than the drive allows non IORDY ? */
+ if (pio) {
+ /* This is cycle times not frequency - watch the logic! */
+ if (pio > 240) /* PIO2 is 240nS per cycle */
+ return 3 << ATA_SHIFT_PIO;
+ return 7 << ATA_SHIFT_PIO;
+ }
+ }
+ return 3 << ATA_SHIFT_PIO;
+}
+
+/**
+ * ata_do_dev_read_id - default ID read method
+ * @dev: device
+ * @tf: proposed taskfile
+ * @id: data buffer
+ *
+ * Issue the identify taskfile and hand back the buffer containing
+ * identify data. For some RAID controllers and for pre ATA devices
+ * this function is wrapped or replaced by the driver
+ */
+unsigned int ata_do_dev_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
+ id, sizeof(id[0]) * ATA_ID_WORDS, 0);
+}
+
+/**
+ * ata_dev_read_id - Read ID data from the specified device
+ * @dev: target device
+ * @p_class: pointer to class of the target device (may be changed)
+ * @flags: ATA_READID_* flags
+ * @id: buffer to read IDENTIFY data into
+ *
+ * Read ID data from the specified device. ATA_CMD_ID_ATA is
+ * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
+ * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
+ * for pre-ATA4 drives.
+ *
+ * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
+ * now we abort if we hit that case.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
+ unsigned int flags, u16 *id)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int class = *p_class;
+ struct ata_taskfile tf;
+ unsigned int err_mask = 0;
+ const char *reason;
+ int may_fallback = 1, tried_spinup = 0;
+ int rc;
+
+ if (ata_msg_ctl(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+
+retry:
+ ata_tf_init(dev, &tf);
+
+ switch (class) {
+ case ATA_DEV_ATA:
+ tf.command = ATA_CMD_ID_ATA;
+ break;
+ case ATA_DEV_ATAPI:
+ tf.command = ATA_CMD_ID_ATAPI;
+ break;
+ default:
+ rc = -ENODEV;
+ reason = "unsupported class";
+ goto err_out;
+ }
+
+ tf.protocol = ATA_PROT_PIO;
+
+ /* Some devices choke if TF registers contain garbage. Make
+ * sure those are properly initialized.
+ */
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+
+ /* Device presence detection is unreliable on some
+ * controllers. Always poll IDENTIFY if available.
+ */
+ tf.flags |= ATA_TFLAG_POLLING;
+
+ if (ap->ops->read_id)
+ err_mask = ap->ops->read_id(dev, &tf, id);
+ else
+ err_mask = ata_do_dev_read_id(dev, &tf, id);
+
+ if (err_mask) {
+ if (err_mask & AC_ERR_NODEV_HINT) {
+ ata_dev_printk(dev, KERN_DEBUG,
+ "NODEV after polling detection\n");
+ return -ENOENT;
+ }
+
+ if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+ /* Device or controller might have reported
+ * the wrong device class. Give a shot at the
+ * other IDENTIFY if the current one is
+ * aborted by the device.
+ */
+ if (may_fallback) {
+ may_fallback = 0;
+
+ if (class == ATA_DEV_ATA)
+ class = ATA_DEV_ATAPI;
+ else
+ class = ATA_DEV_ATA;
+ goto retry;
+ }
+
+ /* Control reaches here iff the device aborted
+ * both flavors of IDENTIFYs which happens
+ * sometimes with phantom devices.
+ */
+ ata_dev_printk(dev, KERN_DEBUG,
+ "both IDENTIFYs aborted, assuming NODEV\n");
+ return -ENOENT;
+ }
+
+ rc = -EIO;
+ reason = "I/O error";
+ goto err_out;
+ }
+
+ /* Falling back doesn't make sense if ID data was read
+ * successfully at least once.
+ */
+ may_fallback = 0;
+
+ swap_buf_le16(id, ATA_ID_WORDS);
+
+ /* sanity check */
+ rc = -EINVAL;
+ reason = "device reports invalid type";
+
+ if (class == ATA_DEV_ATA) {
+ if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
+ goto err_out;
+ } else {
+ if (ata_id_is_ata(id))
+ goto err_out;
+ }
+
+ if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
+ tried_spinup = 1;
+ /*
+ * Drive powered-up in standby mode, and requires a specific
+ * SET_FEATURES spin-up subcommand before it will accept
+ * anything other than the original IDENTIFY command.
+ */
+ err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
+ if (err_mask && id[2] != 0x738c) {
+ rc = -EIO;
+ reason = "SPINUP failed";
+ goto err_out;
+ }
+ /*
+ * If the drive initially returned incomplete IDENTIFY info,
+ * we now must reissue the IDENTIFY command.
+ */
+ if (id[2] == 0x37c8)
+ goto retry;
+ }
+
+ if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
+ /*
+ * The exact sequence expected by certain pre-ATA4 drives is:
+ * SRST RESET
+ * IDENTIFY (optional in early ATA)
+ * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
+ * anything else..
+ * Some drives were very specific about that exact sequence.
+ *
+ * Note that ATA4 says lba is mandatory so the second check
+ * shoud never trigger.
+ */
+ if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
+ err_mask = ata_dev_init_params(dev, id[3], id[6]);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "INIT_DEV_PARAMS failed";
+ goto err_out;
+ }
+
+ /* current CHS translation info (id[53-58]) might be
+ * changed. reread the identify device info.
+ */
+ flags &= ~ATA_READID_POSTRESET;
+ goto retry;
+ }
+ }
+
+ *p_class = class;
+
+ return 0;
+
+ err_out:
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
+ "(%s, err_mask=0x%x)\n", reason, err_mask);
+ return rc;
+}
+
+static inline u8 ata_dev_knobble(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
+ return 0;
+
+ return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
+}
+
+static void ata_dev_config_ncq(struct ata_device *dev,
+ char *desc, size_t desc_sz)
+{
+ struct ata_port *ap = dev->link->ap;
+ int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
+
+ if (!ata_id_has_ncq(dev->id)) {
+ desc[0] = '\0';
+ return;
+ }
+ if (dev->horkage & ATA_HORKAGE_NONCQ) {
+ snprintf(desc, desc_sz, "NCQ (not used)");
+ return;
+ }
+ if (ap->flags & ATA_FLAG_NCQ) {
+ hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
+ dev->flags |= ATA_DFLAG_NCQ;
+ }
+
+ if (hdepth >= ddepth)
+ snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
+ else
+ snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
+}
+
+/**
+ * ata_dev_configure - Configure the specified ATA/ATAPI device
+ * @dev: Target device to configure
+ *
+ * Configure @dev according to @dev->id. Generic and low-level
+ * driver specific fixups are also applied.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise
+ */
+int ata_dev_configure(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+ const u16 *id = dev->id;
+ unsigned long xfer_mask;
+ char revbuf[7]; /* XYZ-99\0 */
+ char fwrevbuf[ATA_ID_FW_REV_LEN+1];
+ char modelbuf[ATA_ID_PROD_LEN+1];
+ int rc;
+
+ if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
+ ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
+ __func__);
+ return 0;
+ }
+
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+
+ /* set horkage */
+ dev->horkage |= ata_dev_blacklisted(dev);
+ ata_force_horkage(dev);
+
+ if (dev->horkage & ATA_HORKAGE_DISABLE) {
+ ata_dev_printk(dev, KERN_INFO,
+ "unsupported device, disabling\n");
+ ata_dev_disable(dev);
+ return 0;
+ }
+
+ if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
+ dev->class == ATA_DEV_ATAPI) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "WARNING: ATAPI is %s, device ignored.\n",
+ atapi_enabled ? "not supported with this driver"
+ : "disabled");
+ ata_dev_disable(dev);
+ return 0;
+ }
+
+ /* let ACPI work its magic */
+ rc = ata_acpi_on_devcfg(dev);
+ if (rc)
+ return rc;
+
+ /* massage HPA, do it early as it might change IDENTIFY data */
+ rc = ata_hpa_resize(dev);
+ if (rc)
+ return rc;
+
+ /* print device capabilities */
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __func__,
+ id[49], id[82], id[83], id[84],
+ id[85], id[86], id[87], id[88]);
+
+ /* initialize to-be-configured parameters */
+ dev->flags &= ~ATA_DFLAG_CFG_MASK;
+ dev->max_sectors = 0;
+ dev->cdb_len = 0;
+ dev->n_sectors = 0;
+ dev->cylinders = 0;
+ dev->heads = 0;
+ dev->sectors = 0;
+
+ /*
+ * common ATA, ATAPI feature tests
+ */
+
+ /* find max transfer mode; for printk only */
+ xfer_mask = ata_id_xfermask(id);
+
+ if (ata_msg_probe(ap))
+ ata_dump_id(id);
+
+ /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
+ ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
+ sizeof(fwrevbuf));
+
+ ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
+ sizeof(modelbuf));
+
+ /* ATA-specific feature tests */
+ if (dev->class == ATA_DEV_ATA) {
+ if (ata_id_is_cfa(id)) {
+ if (id[162] & 1) /* CPRM may make this media unusable */
+ ata_dev_printk(dev, KERN_WARNING,
+ "supports DRM functions and may "
+ "not be fully accessable.\n");
+ snprintf(revbuf, 7, "CFA");
+ } else {
+ snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
+ /* Warn the user if the device has TPM extensions */
+ if (ata_id_has_tpm(id))
+ ata_dev_printk(dev, KERN_WARNING,
+ "supports DRM functions and may "
+ "not be fully accessable.\n");
+ }
+
+ dev->n_sectors = ata_id_n_sectors(id);
+
+ if (dev->id[59] & 0x100)
+ dev->multi_count = dev->id[59] & 0xff;
+
+ if (ata_id_has_lba(id)) {
+ const char *lba_desc;
+ char ncq_desc[20];
+
+ lba_desc = "LBA";
+ dev->flags |= ATA_DFLAG_LBA;
+ if (ata_id_has_lba48(id)) {
+ dev->flags |= ATA_DFLAG_LBA48;
+ lba_desc = "LBA48";
+
+ if (dev->n_sectors >= (1UL << 28) &&
+ ata_id_has_flush_ext(id))
+ dev->flags |= ATA_DFLAG_FLUSH_EXT;
+ }
+
+ /* config NCQ */
+ ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+
+ /* print device info to dmesg */
+ if (ata_msg_drv(ap) && print_info) {
+ ata_dev_printk(dev, KERN_INFO,
+ "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_printk(dev, KERN_INFO,
+ "%Lu sectors, multi %u: %s %s\n",
+ (unsigned long long)dev->n_sectors,
+ dev->multi_count, lba_desc, ncq_desc);
+ }
+ } else {
+ /* CHS */
+
+ /* Default translation */
+ dev->cylinders = id[1];
+ dev->heads = id[3];
+ dev->sectors = id[6];
+
+ if (ata_id_current_chs_valid(id)) {
+ /* Current CHS translation is valid. */
+ dev->cylinders = id[54];
+ dev->heads = id[55];
+ dev->sectors = id[56];
+ }
+
+ /* print device info to dmesg */
+ if (ata_msg_drv(ap) && print_info) {
+ ata_dev_printk(dev, KERN_INFO,
+ "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_printk(dev, KERN_INFO,
+ "%Lu sectors, multi %u, CHS %u/%u/%u\n",
+ (unsigned long long)dev->n_sectors,
+ dev->multi_count, dev->cylinders,
+ dev->heads, dev->sectors);
+ }
+ }
+
+ dev->cdb_len = 16;
+ }
+
+ /* ATAPI-specific feature tests */
+ else if (dev->class == ATA_DEV_ATAPI) {
+ const char *cdb_intr_string = "";
+ const char *atapi_an_string = "";
+ const char *dma_dir_string = "";
+ u32 sntf;
+
+ rc = atapi_cdb_len(id);
+ if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
+ if (ata_msg_warn(ap))
+ ata_dev_printk(dev, KERN_WARNING,
+ "unsupported CDB len\n");
+ rc = -EINVAL;
+ goto err_out_nosup;
+ }
+ dev->cdb_len = (unsigned int) rc;
+
+ /* Enable ATAPI AN if both the host and device have
+ * the support. If PMP is attached, SNTF is required
+ * to enable ATAPI AN to discern between PHY status
+ * changed notifications and ATAPI ANs.
+ */
+ if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+ (!sata_pmp_attached(ap) ||
+ sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
+ unsigned int err_mask;
+
+ /* issue SET feature command to turn this on */
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_AN);
+ if (err_mask)
+ ata_dev_printk(dev, KERN_ERR,
+ "failed to enable ATAPI AN "
+ "(err_mask=0x%x)\n", err_mask);
+ else {
+ dev->flags |= ATA_DFLAG_AN;
+ atapi_an_string = ", ATAPI AN";
+ }
+ }
+
+ if (ata_id_cdb_intr(dev->id)) {
+ dev->flags |= ATA_DFLAG_CDB_INTR;
+ cdb_intr_string = ", CDB intr";
+ }
+
+ if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
+ dev->flags |= ATA_DFLAG_DMADIR;
+ dma_dir_string = ", DMADIR";
+ }
+
+ /* print device info to dmesg */
+ if (ata_msg_drv(ap) && print_info)
+ ata_dev_printk(dev, KERN_INFO,
+ "ATAPI: %s, %s, max %s%s%s%s\n",
+ modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask),
+ cdb_intr_string, atapi_an_string,
+ dma_dir_string);
+ }
+
+ /* determine max_sectors */
+ dev->max_sectors = ATA_MAX_SECTORS;
+ if (dev->flags & ATA_DFLAG_LBA48)
+ dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
+ if (!(dev->horkage & ATA_HORKAGE_IPM)) {
+ if (ata_id_has_hipm(dev->id))
+ dev->flags |= ATA_DFLAG_HIPM;
+ if (ata_id_has_dipm(dev->id))
+ dev->flags |= ATA_DFLAG_DIPM;
+ }
+
+ /* Limit PATA drive on SATA cable bridge transfers to udma5,
+ 200 sectors */
+ if (ata_dev_knobble(dev)) {
+ if (ata_msg_drv(ap) && print_info)
+ ata_dev_printk(dev, KERN_INFO,
+ "applying bridge limits\n");
+ dev->udma_mask &= ATA_UDMA5;
+ dev->max_sectors = ATA_MAX_SECTORS;
+ }
+
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (atapi_command_packet_set(id) == TYPE_TAPE)) {
+ dev->max_sectors = ATA_MAX_SECTORS_TAPE;
+ dev->horkage |= ATA_HORKAGE_STUCK_ERR;
+ }
+
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+ dev->max_sectors);
+
+ if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
+ dev->horkage |= ATA_HORKAGE_IPM;
+
+ /* reset link pm_policy for this port to no pm */
+ ap->pm_policy = MAX_PERFORMANCE;
+ }
+
+ if (ap->ops->dev_config)
+ ap->ops->dev_config(dev);
+
+ if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+ /* Let the user know. We don't want to disallow opens for
+ rescue purposes, or in case the vendor is just a blithering
+ idiot. Do this after the dev_config call as some controllers
+ with buggy firmware may want to avoid reporting false device
+ bugs */
+
+ if (print_info) {
+ ata_dev_printk(dev, KERN_WARNING,
+"Drive reports diagnostics failure. This may indicate a drive\n");
+ ata_dev_printk(dev, KERN_WARNING,
+"fault or invalid emulation. Contact drive vendor for information.\n");
+ }
+ }
+
+ if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
+ ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
+ "firmware update to be fully functional.\n");
+ ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
+ "or visit http://ata.wiki.kernel.org.\n");
+ }
+
+ return 0;
+
+err_out_nosup:
+ if (ata_msg_probe(ap))
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: EXIT, err\n", __func__);
+ return rc;
+}
+
+/**
+ * ata_cable_40wire - return 40 wire cable type
+ * @ap: port
+ *
+ * Helper method for drivers which want to hardwire 40 wire cable
+ * detection.
+ */
+
+int ata_cable_40wire(struct ata_port *ap)
+{
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * ata_cable_80wire - return 80 wire cable type
+ * @ap: port
+ *
+ * Helper method for drivers which want to hardwire 80 wire cable
+ * detection.
+ */
+
+int ata_cable_80wire(struct ata_port *ap)
+{
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * ata_cable_unknown - return unknown PATA cable.
+ * @ap: port
+ *
+ * Helper method for drivers which have no PATA cable detection.
+ */
+
+int ata_cable_unknown(struct ata_port *ap)
+{
+ return ATA_CBL_PATA_UNK;
+}
+
+/**
+ * ata_cable_ignore - return ignored PATA cable.
+ * @ap: port
+ *
+ * Helper method for drivers which don't use cable type to limit
+ * transfer mode.
+ */
+int ata_cable_ignore(struct ata_port *ap)
+{
+ return ATA_CBL_PATA_IGN;
+}
+
+/**
+ * ata_cable_sata - return SATA cable type
+ * @ap: port
+ *
+ * Helper method for drivers which have SATA cables
+ */
+
+int ata_cable_sata(struct ata_port *ap)
+{
+ return ATA_CBL_SATA;
+}
+
+/**
+ * ata_bus_probe - Reset and probe ATA bus
+ * @ap: Bus to probe
+ *
+ * Master ATA bus probing function. Initiates a hardware-dependent
+ * bus reset, then attempts to identify any devices found on
+ * the bus.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * Zero on success, negative errno otherwise.
+ */
+
+int ata_bus_probe(struct ata_port *ap)
+{
+ unsigned int classes[ATA_MAX_DEVICES];
+ int tries[ATA_MAX_DEVICES];
+ int rc;
+ struct ata_device *dev;
+
+ ata_port_probe(ap);
+
+ ata_link_for_each_dev(dev, &ap->link)
+ tries[dev->devno] = ATA_PROBE_MAX_TRIES;
+
+ retry:
+ ata_link_for_each_dev(dev, &ap->link) {
+ /* If we issue an SRST then an ATA drive (not ATAPI)
+ * may change configuration and be in PIO0 timing. If
+ * we do a hard reset (or are coming from power on)
+ * this is true for ATA or ATAPI. Until we've set a
+ * suitable controller mode we should not touch the
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+ * touch the DMA setup as that will be dealt with when
+ * configuring devices.
+ */
+ if (ap->ops->set_piomode)
+ ap->ops->set_piomode(ap, dev);
+ }
+
+ /* reset and determine device classes */
+ ap->ops->phy_reset(ap);
+
+ ata_link_for_each_dev(dev, &ap->link) {
+ if (!(ap->flags & ATA_FLAG_DISABLED) &&
+ dev->class != ATA_DEV_UNKNOWN)
+ classes[dev->devno] = dev->class;
+ else
+ classes[dev->devno] = ATA_DEV_NONE;
+
+ dev->class = ATA_DEV_UNKNOWN;
+ }
+
+ ata_port_probe(ap);
+
+ /* read IDENTIFY page and configure devices. We have to do the identify
+ specific sequence bass-ackwards so that PDIAG- is released by
+ the slave device */
+
+ ata_link_for_each_dev_reverse(dev, &ap->link) {
+ if (tries[dev->devno])
+ dev->class = classes[dev->devno];
+
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
+ dev->id);
+ if (rc)
+ goto fail;
+ }
+
+ /* Now ask for the cable type as PDIAG- should have been released */
+ if (ap->ops->cable_detect)
+ ap->cbl = ap->ops->cable_detect(ap);
+
+ /* We may have SATA bridge glue hiding here irrespective of the
+ reported cable types and sensed types */
+ ata_link_for_each_dev(dev, &ap->link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+ /* SATA drives indicate we have a bridge. We don't know which
+ end of the link the bridge is which is a problem */
+ if (ata_id_is_sata(dev->id))
+ ap->cbl = ATA_CBL_SATA;
+ }
+
+ /* After the identify sequence we can now set up the devices. We do
+ this in the normal order so that the user doesn't get confused */
+
+ ata_link_for_each_dev(dev, &ap->link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
+ rc = ata_dev_configure(dev);
+ ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
+ if (rc)
+ goto fail;
+ }
+
+ /* configure transfer mode */
+ rc = ata_set_mode(&ap->link, &dev);
+ if (rc)
+ goto fail;
+
+ ata_link_for_each_dev(dev, &ap->link)
+ if (ata_dev_enabled(dev))
+ return 0;
+
+ /* no device present, disable port */
+ ata_port_disable(ap);
+ return -ENODEV;
+
+ fail:
+ tries[dev->devno]--;
+
+ switch (rc) {
+ case -EINVAL:
+ /* eeek, something went very wrong, give up */
+ tries[dev->devno] = 0;
+ break;
+
+ case -ENODEV:
+ /* give it just one more chance */
+ tries[dev->devno] = min(tries[dev->devno], 1);
+ case -EIO:
+ if (tries[dev->devno] == 1) {
+ /* This is the last chance, better to slow
+ * down than lose it.
+ */
+ sata_down_spd_limit(&ap->link);
+ ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+ }
+ }
+
+ if (!tries[dev->devno])
+ ata_dev_disable(dev);
+
+ goto retry;
+}
+
+/**
+ * ata_port_probe - Mark port as enabled
+ * @ap: Port for which we indicate enablement
+ *
+ * Modify @ap data structure such that the system
+ * thinks that the entire port is enabled.
+ *
+ * LOCKING: host lock, or some other form of
+ * serialization.
+ */
+
+void ata_port_probe(struct ata_port *ap)
+{
+ ap->flags &= ~ATA_FLAG_DISABLED;
+}
+
+/**
+ * sata_print_link_status - Print SATA link status
+ * @link: SATA link to printk link status about
+ *
+ * This function prints link speed and status of a SATA link.
+ *
+ * LOCKING:
+ * None.
+ */
+static void sata_print_link_status(struct ata_link *link)
+{
+ u32 sstatus, scontrol, tmp;
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus))
+ return;
+ sata_scr_read(link, SCR_CONTROL, &scontrol);
+
+ if (ata_phys_link_online(link)) {
+ tmp = (sstatus >> 4) & 0xf;
+ ata_link_printk(link, KERN_INFO,
+ "SATA link up %s (SStatus %X SControl %X)\n",
+ sata_spd_string(tmp), sstatus, scontrol);
+ } else {
+ ata_link_printk(link, KERN_INFO,
+ "SATA link down (SStatus %X SControl %X)\n",
+ sstatus, scontrol);
+ }
+}
+
+/**
+ * ata_dev_pair - return other device on cable
+ * @adev: device
+ *
+ * Obtain the other device on the same cable, or if none is
+ * present NULL is returned
+ */
+
+struct ata_device *ata_dev_pair(struct ata_device *adev)
+{
+ struct ata_link *link = adev->link;
+ struct ata_device *pair = &link->device[1 - adev->devno];
+ if (!ata_dev_enabled(pair))
+ return NULL;
+ return pair;
+}
+
+/**
+ * ata_port_disable - Disable port.
+ * @ap: Port to be disabled.
+ *
+ * Modify @ap data structure such that the system
+ * thinks that the entire port is disabled, and should
+ * never attempt to probe or communicate with devices
+ * on this port.
+ *
+ * LOCKING: host lock, or some other form of
+ * serialization.
+ */
+
+void ata_port_disable(struct ata_port *ap)
+{
+ ap->link.device[0].class = ATA_DEV_NONE;
+ ap->link.device[1].class = ATA_DEV_NONE;
+ ap->flags |= ATA_FLAG_DISABLED;
+}
+
+/**
+ * sata_down_spd_limit - adjust SATA spd limit downward
+ * @link: Link to adjust SATA spd limit for
+ *
+ * Adjust SATA spd limit of @link downward. Note that this
+ * function only adjusts the limit. The change must be applied
+ * using sata_set_spd().
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int sata_down_spd_limit(struct ata_link *link)
+{
+ u32 sstatus, spd, mask;
+ int rc, highbit;
+
+ if (!sata_scr_valid(link))
+ return -EOPNOTSUPP;
+
+ /* If SCR can be read, use it to determine the current SPD.
+ * If not, use cached value in link->sata_spd.
+ */
+ rc = sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (rc == 0)
+ spd = (sstatus >> 4) & 0xf;
+ else
+ spd = link->sata_spd;
+
+ mask = link->sata_spd_limit;
+ if (mask <= 1)
+ return -EINVAL;
+
+ /* unconditionally mask off the highest bit */
+ highbit = fls(mask) - 1;
+ mask &= ~(1 << highbit);
+
+ /* Mask off all speeds higher than or equal to the current
+ * one. Force 1.5Gbps if current SPD is not available.
+ */
+ if (spd > 1)
+ mask &= (1 << (spd - 1)) - 1;
+ else
+ mask &= 1;
+
+ /* were we already at the bottom? */
+ if (!mask)
+ return -EINVAL;
+
+ link->sata_spd_limit = mask;
+
+ ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
+
+ return 0;
+}
+
+static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
+{
+ struct ata_link *host_link = &link->ap->link;
+ u32 limit, target, spd;
+
+ limit = link->sata_spd_limit;
+
+ /* Don't configure downstream link faster than upstream link.
+ * It doesn't speed up anything and some PMPs choke on such
+ * configuration.
+ */
+ if (!ata_is_host_link(link) && host_link->sata_spd)
+ limit &= (1 << host_link->sata_spd) - 1;
+
+ if (limit == UINT_MAX)
+ target = 0;
+ else
+ target = fls(limit);
+
+ spd = (*scontrol >> 4) & 0xf;
+ *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
+
+ return spd != target;
+}
+
+/**
+ * sata_set_spd_needed - is SATA spd configuration needed
+ * @link: Link in question
+ *
+ * Test whether the spd limit in SControl matches
+ * @link->sata_spd_limit. This function is used to determine
+ * whether hardreset is necessary to apply SATA spd
+ * configuration.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 1 if SATA spd configuration is needed, 0 otherwise.
+ */
+static int sata_set_spd_needed(struct ata_link *link)
+{
+ u32 scontrol;
+
+ if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+ return 1;
+
+ return __sata_set_spd_needed(link, &scontrol);
+}
+
+/**
+ * sata_set_spd - set SATA spd according to spd limit
+ * @link: Link to set SATA spd for
+ *
+ * Set SATA spd of @link according to sata_spd_limit.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 if spd doesn't need to be changed, 1 if spd has been
+ * changed. Negative errno if SCR registers are inaccessible.
+ */
+int sata_set_spd(struct ata_link *link)
+{
+ u32 scontrol;
+ int rc;
+
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+
+ if (!__sata_set_spd_needed(link, &scontrol))
+ return 0;
+
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
+
+ return 1;
+}
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for UDMA6, which is currently supported only by Maxtor drives.
+ *
+ * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
+ */
+
+static const struct ata_timing ata_timing[] = {
+/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
+ { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
+ { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
+ { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
+ { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
+ { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
+ { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
+ { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
+
+ { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
+ { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
+ { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
+
+ { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
+ { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
+ { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
+ { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
+ { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
+
+/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
+ { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
+ { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
+ { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
+ { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
+ { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
+
+ { 0xFF }
+};
+
+#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
+#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
+{
+ q->setup = EZ(t->setup * 1000, T);
+ q->act8b = EZ(t->act8b * 1000, T);
+ q->rec8b = EZ(t->rec8b * 1000, T);
+ q->cyc8b = EZ(t->cyc8b * 1000, T);
+ q->active = EZ(t->active * 1000, T);
+ q->recover = EZ(t->recover * 1000, T);
+ q->cycle = EZ(t->cycle * 1000, T);
+ q->udma = EZ(t->udma * 1000, UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+ struct ata_timing *m, unsigned int what)
+{
+ if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
+ if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
+ if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
+ if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
+ if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
+ if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+ if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
+ if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
+}
+
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
+{
+ const struct ata_timing *t = ata_timing;
+
+ while (xfer_mode > t->mode)
+ t++;
+
+ if (xfer_mode == t->mode)
+ return t;
+ return NULL;
+}
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+ struct ata_timing *t, int T, int UT)
+{
+ const struct ata_timing *s;
+ struct ata_timing p;
+
+ /*
+ * Find the mode.
+ */
+
+ if (!(s = ata_timing_find_mode(speed)))
+ return -EINVAL;
+
+ memcpy(t, s, sizeof(*s));
+
+ /*
+ * If the drive is an EIDE drive, it can tell us it needs extended
+ * PIO/MW_DMA cycle timing.
+ */
+
+ if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
+ memset(&p, 0, sizeof(p));
+ if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+ if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
+ else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
+ } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
+ p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
+ }
+ ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+ }
+
+ /*
+ * Convert the timing to bus clock counts.
+ */
+
+ ata_timing_quantize(t, t, T, UT);
+
+ /*
+ * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
+ * S.M.A.R.T * and some other commands. We have to ensure that the
+ * DMA cycle timing is slower/equal than the fastest PIO timing.
+ */
+
+ if (speed > XFER_PIO_6) {
+ ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+ ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+ }
+
+ /*
+ * Lengthen active & recovery time so that cycle time is correct.
+ */
+
+ if (t->act8b + t->rec8b < t->cyc8b) {
+ t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+ t->rec8b = t->cyc8b - t->act8b;
+ }
+
+ if (t->active + t->recover < t->cycle) {
+ t->active += (t->cycle - (t->active + t->recover)) / 2;
+ t->recover = t->cycle - t->active;
+ }
+
+ /* In a few cases quantisation may produce enough errors to
+ leave t->cycle too low for the sum of active and recovery
+ if so we must correct this */
+ if (t->active + t->recover > t->cycle)
+ t->cycle = t->active + t->recover;
+
+ return 0;
+}
+
+/**
+ * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
+ * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
+ * @cycle: cycle duration in ns
+ *
+ * Return matching xfer mode for @cycle. The returned mode is of
+ * the transfer type specified by @xfer_shift. If @cycle is too
+ * slow for @xfer_shift, 0xff is returned. If @cycle is faster
+ * than the fastest known mode, the fasted mode is returned.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Matching xfer_mode, 0xff if no match found.
+ */
+u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
+{
+ u8 base_mode = 0xff, last_mode = 0xff;
+ const struct ata_xfer_ent *ent;
+ const struct ata_timing *t;
+
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ if (ent->shift == xfer_shift)
+ base_mode = ent->base;
+
+ for (t = ata_timing_find_mode(base_mode);
+ t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
+ unsigned short this_cycle;
+
+ switch (xfer_shift) {
+ case ATA_SHIFT_PIO:
+ case ATA_SHIFT_MWDMA:
+ this_cycle = t->cycle;
+ break;
+ case ATA_SHIFT_UDMA:
+ this_cycle = t->udma;
+ break;
+ default:
+ return 0xff;
+ }
+
+ if (cycle > this_cycle)
+ break;
+
+ last_mode = t->mode;
+ }
+
+ return last_mode;
+}
+
+/**
+ * ata_down_xfermask_limit - adjust dev xfer masks downward
+ * @dev: Device to adjust xfer masks
+ * @sel: ATA_DNXFER_* selector
+ *
+ * Adjust xfer masks of @dev downward. Note that this function
+ * does not apply the change. Invoking ata_set_mode() afterwards
+ * will apply the limit.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure
+ */
+int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
+{
+ char buf[32];
+ unsigned long orig_mask, xfer_mask;
+ unsigned long pio_mask, mwdma_mask, udma_mask;
+ int quiet, highbit;
+
+ quiet = !!(sel & ATA_DNXFER_QUIET);
+ sel &= ~ATA_DNXFER_QUIET;
+
+ xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
+ dev->mwdma_mask,
+ dev->udma_mask);
+ ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
+
+ switch (sel) {
+ case ATA_DNXFER_PIO:
+ highbit = fls(pio_mask) - 1;
+ pio_mask &= ~(1 << highbit);
+ break;
+
+ case ATA_DNXFER_DMA:
+ if (udma_mask) {
+ highbit = fls(udma_mask) - 1;
+ udma_mask &= ~(1 << highbit);
+ if (!udma_mask)
+ return -ENOENT;
+ } else if (mwdma_mask) {
+ highbit = fls(mwdma_mask) - 1;
+ mwdma_mask &= ~(1 << highbit);
+ if (!mwdma_mask)
+ return -ENOENT;
+ }
+ break;
+
+ case ATA_DNXFER_40C:
+ udma_mask &= ATA_UDMA_MASK_40C;
+ break;
+
+ case ATA_DNXFER_FORCE_PIO0:
+ pio_mask &= 1;
+ case ATA_DNXFER_FORCE_PIO:
+ mwdma_mask = 0;
+ udma_mask = 0;
+ break;
+
+ default:
+ BUG();
+ }
+
+ xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
+
+ if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
+ return -ENOENT;
+
+ if (!quiet) {
+ if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
+ snprintf(buf, sizeof(buf), "%s:%s",
+ ata_mode_string(xfer_mask),
+ ata_mode_string(xfer_mask & ATA_MASK_PIO));
+ else
+ snprintf(buf, sizeof(buf), "%s",
+ ata_mode_string(xfer_mask));
+
+ ata_dev_printk(dev, KERN_WARNING,
+ "limiting speed to %s\n", buf);
+ }
+
+ ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
+ &dev->udma_mask);
+
+ return 0;
+}
+
+static int ata_dev_set_mode(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ const char *dev_err_whine = "";
+ int ign_dev_err = 0;
+ unsigned int err_mask;
+ int rc;
+
+ dev->flags &= ~ATA_DFLAG_PIO;
+ if (dev->xfer_shift == ATA_SHIFT_PIO)
+ dev->flags |= ATA_DFLAG_PIO;
+
+ err_mask = ata_dev_set_xfermode(dev);
+
+ if (err_mask & ~AC_ERR_DEV)
+ goto fail;
+
+ /* revalidate */
+ ehc->i.flags |= ATA_EHI_POST_SETMODE;
+ rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
+ ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
+ if (rc)
+ return rc;
+
+ if (dev->xfer_shift == ATA_SHIFT_PIO) {
+ /* Old CFA may refuse this command, which is just fine */
+ if (ata_id_is_cfa(dev->id))
+ ign_dev_err = 1;
+ /* Catch several broken garbage emulations plus some pre
+ ATA devices */
+ if (ata_id_major_version(dev->id) == 0 &&
+ dev->pio_mode <= XFER_PIO_2)
+ ign_dev_err = 1;
+ /* Some very old devices and some bad newer ones fail
+ any kind of SET_XFERMODE request but support PIO0-2
+ timings and no IORDY */
+ if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
+ ign_dev_err = 1;
+ }
+ /* Early MWDMA devices do DMA but don't allow DMA mode setting.
+ Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
+ if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
+ dev->dma_mode == XFER_MW_DMA_0 &&
+ (dev->id[63] >> 8) & 1)
+ ign_dev_err = 1;
+
+ /* if the device is actually configured correctly, ignore dev err */
+ if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
+ ign_dev_err = 1;
+
+ if (err_mask & AC_ERR_DEV) {
+ if (!ign_dev_err)
+ goto fail;
+ else
+ dev_err_whine = " (device error ignored)";
+ }
+
+ DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
+ dev->xfer_shift, (int)dev->xfer_mode);
+
+ ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
+ ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+ dev_err_whine);
+
+ return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
+}
+
+/**
+ * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ * @link: link on which timings will be programmed
+ * @r_failed_dev: out parameter for failed device
+ *
+ * Standard implementation of the function used to tune and set
+ * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
+ * ata_dev_set_mode() fails, pointer to the failing device is
+ * returned in @r_failed_dev.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+
+int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *dev;
+ int rc = 0, used_dma = 0, found = 0;
+
+ /* step 1: calculate xfer_mask */
+ ata_link_for_each_dev(dev, link) {
+ unsigned long pio_mask, dma_mask;
+ unsigned int mode_mask;
+
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ mode_mask = ATA_DMA_MASK_ATA;
+ if (dev->class == ATA_DEV_ATAPI)
+ mode_mask = ATA_DMA_MASK_ATAPI;
+ else if (ata_id_is_cfa(dev->id))
+ mode_mask = ATA_DMA_MASK_CFA;
+
+ ata_dev_xfermask(dev);
+ ata_force_xfermask(dev);
+
+ pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
+ dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+
+ if (libata_dma_mask & mode_mask)
+ dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+ else
+ dma_mask = 0;
+
+ dev->pio_mode = ata_xfer_mask2mode(pio_mask);
+ dev->dma_mode = ata_xfer_mask2mode(dma_mask);
+
+ found = 1;
+ if (ata_dma_enabled(dev))
+ used_dma = 1;
+ }
+ if (!found)
+ goto out;
+
+ /* step 2: always set host PIO timings */
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ if (dev->pio_mode == 0xff) {
+ ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev->xfer_mode = dev->pio_mode;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ if (ap->ops->set_piomode)
+ ap->ops->set_piomode(ap, dev);
+ }
+
+ /* step 3: set host DMA timings */
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
+ continue;
+
+ dev->xfer_mode = dev->dma_mode;
+ dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
+ if (ap->ops->set_dmamode)
+ ap->ops->set_dmamode(ap, dev);
+ }
+
+ /* step 4: update devices' xfer mode */
+ ata_link_for_each_dev(dev, link) {
+ /* don't update suspended devices' xfer mode */
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ rc = ata_dev_set_mode(dev);
+ if (rc)
+ goto out;
+ }
+
+ /* Record simplex status. If we selected DMA then the other
+ * host channels are not permitted to do so.
+ */
+ if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
+ ap->host->simplex_claimed = ap;
+
+ out:
+ if (rc)
+ *r_failed_dev = dev;
+ return rc;
+}
+
+/**
+ * ata_wait_ready - wait for link to become ready
+ * @link: link to be waited on
+ * @deadline: deadline jiffies for the operation
+ * @check_ready: callback to check link readiness
+ *
+ * Wait for @link to become ready. @check_ready should return
+ * positive number if @link is ready, 0 if it isn't, -ENODEV if
+ * link doesn't seem to be occupied, other errno for other error
+ * conditions.
+ *
+ * Transient -ENODEV conditions are allowed for
+ * ATA_TMOUT_FF_WAIT.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 if @linke is ready before @deadline; otherwise, -errno.
+ */
+int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ unsigned long start = jiffies;
+ unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+ int warned = 0;
+
+ /* Slave readiness can't be tested separately from master. On
+ * M/S emulation configuration, this function should be called
+ * only on the master and it will handle both master and slave.
+ */
+ WARN_ON(link == link->ap->slave_link);
+
+ if (time_after(nodev_deadline, deadline))
+ nodev_deadline = deadline;
+
+ while (1) {
+ unsigned long now = jiffies;
+ int ready, tmp;
+
+ ready = tmp = check_ready(link);
+ if (ready > 0)
+ return 0;
+
+ /* -ENODEV could be transient. Ignore -ENODEV if link
+ * is online. Also, some SATA devices take a long
+ * time to clear 0xff after reset. For example,
+ * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
+ * GoVault needs even more than that. Wait for
+ * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ *
+ * Note that some PATA controllers (pata_ali) explode
+ * if status register is read more than once when
+ * there's no device attached.
+ */
+ if (ready == -ENODEV) {
+ if (ata_link_online(link))
+ ready = 0;
+ else if ((link->ap->flags & ATA_FLAG_SATA) &&
+ !ata_link_offline(link) &&
+ time_before(now, nodev_deadline))
+ ready = 0;
+ }
+
+ if (ready)
+ return ready;
+ if (time_after(now, deadline))
+ return -EBUSY;
+
+ if (!warned && time_after(now, start + 5 * HZ) &&
+ (deadline - now > 3 * HZ)) {
+ ata_link_printk(link, KERN_WARNING,
+ "link is slow to respond, please be patient "
+ "(ready=%d)\n", tmp);
+ warned = 1;
+ }
+
+ msleep(50);
+ }
+}
+
+/**
+ * ata_wait_after_reset - wait for link to become ready after reset
+ * @link: link to be waited on
+ * @deadline: deadline jiffies for the operation
+ * @check_ready: callback to check link readiness
+ *
+ * Wait for @link to become ready after reset.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 if @linke is ready before @deadline; otherwise, -errno.
+ */
+int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ msleep(ATA_WAIT_AFTER_RESET);
+
+ return ata_wait_ready(link, deadline, check_ready);
+}
+
+/**
+ * sata_link_debounce - debounce SATA phy status
+ * @link: ATA link to debounce SATA phy status for
+ * @params: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ *
+* Make sure SStatus of @link reaches stable state, determined by
+ * holding the same value where DET is not 1 for @duration polled
+ * every @interval, before @timeout. Timeout constraints the
+ * beginning of the stable state. Because DET gets stuck at 1 on
+ * some controllers after hot unplugging, this functions waits
+ * until timeout then returns 0 if DET is stable at 1.
+ *
+ * @timeout is further limited by @deadline. The sooner of the
+ * two is used.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline)
+{
+ unsigned long interval = params[0];
+ unsigned long duration = params[1];
+ unsigned long last_jiffies, t;
+ u32 last, cur;
+ int rc;
+
+ t = ata_deadline(jiffies, params[2]);
+ if (time_before(t, deadline))
+ deadline = t;
+
+ if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+ return rc;
+ cur &= 0xf;
+
+ last = cur;
+ last_jiffies = jiffies;
+
+ while (1) {
+ msleep(interval);
+ if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
+ return rc;
+ cur &= 0xf;
+
+ /* DET stable? */
+ if (cur == last) {
+ if (cur == 1 && time_before(jiffies, deadline))
+ continue;
+ if (time_after(jiffies,
+ ata_deadline(last_jiffies, duration)))
+ return 0;
+ continue;
+ }
+
+ /* unstable, start over */
+ last = cur;
+ last_jiffies = jiffies;
+
+ /* Check deadline. If debouncing failed, return
+ * -EPIPE to tell upper layer to lower link speed.
+ */
+ if (time_after(jiffies, deadline))
+ return -EPIPE;
+ }
+}
+
+/**
+ * sata_link_resume - resume SATA link
+ * @link: ATA link to resume SATA
+ * @params: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ *
+ * Resume SATA phy @link and debounce it.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline)
+{
+ u32 scontrol, serror;
+ int rc;
+
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ return rc;
+
+ scontrol = (scontrol & 0x0f0) | 0x300;
+
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ return rc;
+
+ /* Some PHYs react badly if SStatus is pounded immediately
+ * after resuming. Delay 200ms before debouncing.
+ */
+ msleep(200);
+
+ if ((rc = sata_link_debounce(link, params, deadline)))
+ return rc;
+
+ /* clear SError, some PHYs require this even for SRST to work */
+ if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+ rc = sata_scr_write(link, SCR_ERROR, serror);
+
+ return rc != -EINVAL ? rc : 0;
+}
+
+/**
+ * ata_std_prereset - prepare for reset
+ * @link: ATA link to be reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * @link is about to be reset. Initialize it. Failure from
+ * prereset makes libata abort whole reset sequence and give up
+ * that port, so prereset should be best-effort. It does its
+ * best to prepare for reset sequence but if things go wrong, it
+ * should just whine, not fail.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_std_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ int rc;
+
+ /* if we're about to do hardreset, nothing more to do */
+ if (ehc->i.action & ATA_EH_HARDRESET)
+ return 0;
+
+ /* if SATA, resume link */
+ if (ap->flags & ATA_FLAG_SATA) {
+ rc = sata_link_resume(link, timing, deadline);
+ /* whine about phy resume failure but proceed */
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING, "failed to resume "
+ "link for reset (errno=%d)\n", rc);
+ }
+
+ /* no point in trying softreset on offline link */
+ if (ata_phys_link_offline(link))
+ ehc->i.action &= ~ATA_EH_SOFTRESET;
+
+ return 0;
+}
+
+/**
+ * sata_link_hardreset - reset link via SATA phy reset
+ * @link: link to reset
+ * @timing: timing parameters { interval, duratinon, timeout } in msec
+ * @deadline: deadline jiffies for the operation
+ * @online: optional out parameter indicating link onlineness
+ * @check_ready: optional callback to check link readiness
+ *
+ * SATA phy-reset @link using DET bits of SControl register.
+ * After hardreset, link readiness is waited upon using
+ * ata_wait_ready() if @check_ready is specified. LLDs are
+ * allowed to not specify @check_ready and wait itself after this
+ * function returns. Device classification is LLD's
+ * responsibility.
+ *
+ * *@online is set to one iff reset succeeded and @link is online
+ * after reset.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+ unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *))
+{
+ u32 scontrol;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ if (online)
+ *online = false;
+
+ if (sata_set_spd_needed(link)) {
+ /* SATA spec says nothing about how to reconfigure
+ * spd. To be on the safe side, turn off phy during
+ * reconfiguration. This works for at least ICH7 AHCI
+ * and Sil3124.
+ */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ goto out;
+
+ scontrol = (scontrol & 0x0f0) | 0x304;
+
+ if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
+ goto out;
+
+ sata_set_spd(link);
+ }
+
+ /* issue phy wake/reset */
+ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
+ goto out;
+
+ scontrol = (scontrol & 0x0f0) | 0x301;
+
+ if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
+ goto out;
+
+ /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
+ * 10.4.2 says at least 1 ms.
+ */
+ msleep(1);
+
+ /* bring link back */
+ rc = sata_link_resume(link, timing, deadline);
+ if (rc)
+ goto out;
+ /* if link is offline nothing more to do */
+ if (ata_phys_link_offline(link))
+ goto out;
+
+ /* Link is online. From this point, -ENODEV too is an error. */
+ if (online)
+ *online = true;
+
+ if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+ /* If PMP is supported, we have to do follow-up SRST.
+ * Some PMPs don't send D2H Reg FIS after hardreset if
+ * the first port is empty. Wait only for
+ * ATA_TMOUT_PMP_SRST_WAIT.
+ */
+ if (check_ready) {
+ unsigned long pmp_deadline;
+
+ pmp_deadline = ata_deadline(jiffies,
+ ATA_TMOUT_PMP_SRST_WAIT);
+ if (time_after(pmp_deadline, deadline))
+ pmp_deadline = deadline;
+ ata_wait_ready(link, pmp_deadline, check_ready);
+ }
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ if (check_ready)
+ rc = ata_wait_ready(link, deadline, check_ready);
+ out:
+ if (rc && rc != -EAGAIN) {
+ /* online is set iff link is online && reset succeeded */
+ if (online)
+ *online = false;
+ ata_link_printk(link, KERN_ERR,
+ "COMRESET failed (errno=%d)\n", rc);
+ }
+ DPRINTK("EXIT, rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * sata_std_hardreset - COMRESET w/o waiting or classification
+ * @link: link to reset
+ * @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
+ *
+ * Standard SATA COMRESET w/o waiting or classification.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 if link offline, -EAGAIN if link online, -errno on errors.
+ */
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ int rc;
+
+ /* do hardreset */
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+ return online ? -EAGAIN : rc;
+}
+
+/**
+ * ata_std_postreset - standard postreset callback
+ * @link: the target ata_link
+ * @classes: classes of attached devices
+ *
+ * This function is invoked after a successful reset. Note that
+ * the device might have been reset more than once using
+ * different reset methods before postreset is invoked.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_std_postreset(struct ata_link *link, unsigned int *classes)
+{
+ u32 serror;
+
+ DPRINTK("ENTER\n");
+
+ /* reset complete, clear SError */
+ if (!sata_scr_read(link, SCR_ERROR, &serror))
+ sata_scr_write(link, SCR_ERROR, serror);
+
+ /* print link status */
+ sata_print_link_status(link);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_dev_same_device - Determine whether new ID matches configured device
+ * @dev: device to compare against
+ * @new_class: class of the new device
+ * @new_id: IDENTIFY page of the new device
+ *
+ * Compare @new_class and @new_id against @dev and determine
+ * whether @dev is the device indicated by @new_class and
+ * @new_id.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if @dev matches @new_class and @new_id, 0 otherwise.
+ */
+static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
+ const u16 *new_id)
+{
+ const u16 *old_id = dev->id;
+ unsigned char model[2][ATA_ID_PROD_LEN + 1];
+ unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
+
+ if (dev->class != new_class) {
+ ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
+ dev->class, new_class);
+ return 0;
+ }
+
+ ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
+ ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
+ ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
+ ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
+
+ if (strcmp(model[0], model[1])) {
+ ata_dev_printk(dev, KERN_INFO, "model number mismatch "
+ "'%s' != '%s'\n", model[0], model[1]);
+ return 0;
+ }
+
+ if (strcmp(serial[0], serial[1])) {
+ ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
+ "'%s' != '%s'\n", serial[0], serial[1]);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * ata_dev_reread_id - Re-read IDENTIFY data
+ * @dev: target ATA device
+ * @readid_flags: read ID flags
+ *
+ * Re-read IDENTIFY page and make sure @dev is still attached to
+ * the port.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
+{
+ unsigned int class = dev->class;
+ u16 *id = (void *)dev->link->ap->sector_buf;
+ int rc;
+
+ /* read ID data */
+ rc = ata_dev_read_id(dev, &class, readid_flags, id);
+ if (rc)
+ return rc;
+
+ /* is the device still there? */
+ if (!ata_dev_same_device(dev, class, id))
+ return -ENODEV;
+
+ memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
+ return 0;
+}
+
+/**
+ * ata_dev_revalidate - Revalidate ATA device
+ * @dev: device to revalidate
+ * @new_class: new class code
+ * @readid_flags: read ID flags
+ *
+ * Re-read IDENTIFY page, make sure @dev is still attached to the
+ * port and reconfigure it according to the new IDENTIFY page.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+ unsigned int readid_flags)
+{
+ u64 n_sectors = dev->n_sectors;
+ int rc;
+
+ if (!ata_dev_enabled(dev))
+ return -ENODEV;
+
+ /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
+ if (ata_class_enabled(new_class) &&
+ new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
+ ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
+ dev->class, new_class);
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ /* re-read ID */
+ rc = ata_dev_reread_id(dev, readid_flags);
+ if (rc)
+ goto fail;
+
+ /* configure device according to the new ID */
+ rc = ata_dev_configure(dev);
+ if (rc)
+ goto fail;
+
+ /* verify n_sectors hasn't changed */
+ if (dev->class == ATA_DEV_ATA && n_sectors &&
+ dev->n_sectors != n_sectors) {
+ ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
+ "%llu != %llu\n",
+ (unsigned long long)n_sectors,
+ (unsigned long long)dev->n_sectors);
+
+ /* restore original n_sectors */
+ dev->n_sectors = n_sectors;
+
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
+ return rc;
+}
+
+struct ata_blacklist_entry {
+ const char *model_num;
+ const char *model_rev;
+ unsigned long horkage;
+};
+
+static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ /* Devices with DMA related problems under Linux */
+ { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
+ { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
+ { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
+ { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
+ { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
+ { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
+ { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
+ { "CRD-84", NULL, ATA_HORKAGE_NODMA },
+ { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
+ { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
+ { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
+ { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
+ { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
+ { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
+ { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
+ { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
+ { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
+ { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
+ { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
+ { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
+ { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
+ { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
+ { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
+ { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
+ /* Odd clown on sil3726/4726 PMPs */
+ { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
+
+ /* Devices we expect to fail diagnostics */
+
+ /* Devices where NCQ should be avoided */
+ /* NCQ is slow */
+ { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
+ /* http://thread.gmane.org/gmane.linux.ide/14907 */
+ { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
+ /* NCQ is broken */
+ { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
+ { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
+ { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
+ { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
+
+ /* Seagate NCQ + FLUSH CACHE firmware bug */
+ { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+ { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
+ { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
+
+ /* devices which puke on READ_NATIVE_MAX */
+ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
+ { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
+ { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+
+ /* Devices which report 1 sector over size HPA */
+ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
+
+ /* Devices which get the IVB wrong */
+ { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
+ /* Maybe we should just blacklist TSSTcorp... */
+ { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
+
+ /* Devices that do not need bridging limits applied */
+ { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
+
+ /* End Marker */
+ { }
+};
+
+static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
+{
+ const char *p;
+ int len;
+
+ /*
+ * check for trailing wildcard: *\0
+ */
+ p = strchr(patt, wildchar);
+ if (p && ((*(p + 1)) == 0))
+ len = p - patt;
+ else {
+ len = strlen(name);
+ if (!len) {
+ if (!*patt)
+ return 0;
+ return -1;
+ }
+ }
+
+ return strncmp(patt, name, len);
+}
+
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
+{
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
+ const struct ata_blacklist_entry *ad = ata_device_blacklist;
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
+
+ while (ad->model_num) {
+ if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
+ if (ad->model_rev == NULL)
+ return ad->horkage;
+ if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
+ return ad->horkage;
+ }
+ ad++;
+ }
+ return 0;
+}
+
+static int ata_dma_blacklisted(const struct ata_device *dev)
+{
+ /* We don't support polling DMA.
+ * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
+ * if the LLDD handles only interrupts in the HSM_ST_LAST state.
+ */
+ if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
+ (dev->flags & ATA_DFLAG_CDB_INTR))
+ return 1;
+ return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
+}
+
+/**
+ * ata_is_40wire - check drive side detection
+ * @dev: device
+ *
+ * Perform drive side detection decoding, allowing for device vendors
+ * who can't follow the documentation.
+ */
+
+static int ata_is_40wire(struct ata_device *dev)
+{
+ if (dev->horkage & ATA_HORKAGE_IVB)
+ return ata_drive_40wire_relaxed(dev->id);
+ return ata_drive_40wire(dev->id);
+}
+
+/**
+ * cable_is_40wire - 40/80/SATA decider
+ * @ap: port to consider
+ *
+ * This function encapsulates the policy for speed management
+ * in one place. At the moment we don't cache the result but
+ * there is a good case for setting ap->cbl to the result when
+ * we are called with unknown cables (and figuring out if it
+ * impacts hotplug at all).
+ *
+ * Return 1 if the cable appears to be 40 wire.
+ */
+
+static int cable_is_40wire(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ /* If the controller thinks we are 40 wire, we are. */
+ if (ap->cbl == ATA_CBL_PATA40)
+ return 1;
+
+ /* If the controller thinks we are 80 wire, we are. */
+ if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
+ return 0;
+
+ /* If the system is known to be 40 wire short cable (eg
+ * laptop), then we allow 80 wire modes even if the drive
+ * isn't sure.
+ */
+ if (ap->cbl == ATA_CBL_PATA40_SHORT)
+ return 0;
+
+ /* If the controller doesn't know, we scan.
+ *
+ * Note: We look for all 40 wire detects at this point. Any
+ * 80 wire detect is taken to be 80 wire cable because
+ * - in many setups only the one drive (slave if present) will
+ * give a valid detect
+ * - if you have a non detect capable drive you don't want it
+ * to colour the choice
+ */
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * ata_dev_xfermask - Compute supported xfermask of the given device
+ * @dev: Device to compute xfermask for
+ *
+ * Compute supported xfermask of @dev and store it in
+ * dev->*_mask. This function is responsible for applying all
+ * known limits including host controller limits, device
+ * blacklist, etc...
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_dev_xfermask(struct ata_device *dev)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ata_host *host = ap->host;
+ unsigned long xfer_mask;
+
+ /* controller modes available */
+ xfer_mask = ata_pack_xfermask(ap->pio_mask,
+ ap->mwdma_mask, ap->udma_mask);
+
+ /* drive modes available */
+ xfer_mask &= ata_pack_xfermask(dev->pio_mask,
+ dev->mwdma_mask, dev->udma_mask);
+ xfer_mask &= ata_id_xfermask(dev->id);
+
+ /*
+ * CFA Advanced TrueIDE timings are not allowed on a shared
+ * cable
+ */
+ if (ata_dev_pair(dev)) {
+ /* No PIO5 or PIO6 */
+ xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
+ /* No MWDMA3 or MWDMA 4 */
+ xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
+ }
+
+ if (ata_dma_blacklisted(dev)) {
+ xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+ ata_dev_printk(dev, KERN_WARNING,
+ "device is on DMA blacklist, disabling DMA\n");
+ }
+
+ if ((host->flags & ATA_HOST_SIMPLEX) &&
+ host->simplex_claimed && host->simplex_claimed != ap) {
+ xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+ ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
+ "other device, disabling DMA\n");
+ }
+
+ if (ap->flags & ATA_FLAG_NO_IORDY)
+ xfer_mask &= ata_pio_mask_no_iordy(dev);
+
+ if (ap->ops->mode_filter)
+ xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
+
+ /* Apply cable rule here. Don't apply it early because when
+ * we handle hot plug the cable type can itself change.
+ * Check this last so that we know if the transfer rate was
+ * solely limited by the cable.
+ * Unknown or 80 wire cables reported host side are checked
+ * drive side as well. Cases where we know a 40wire cable
+ * is used safely for 80 are not checked here.
+ */
+ if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+ /* UDMA/44 or higher would be available */
+ if (cable_is_40wire(ap)) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "limited to UDMA/33 due to 40-wire cable\n");
+ xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+ }
+
+ ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
+ &dev->mwdma_mask, &dev->udma_mask);
+}
+
+/**
+ * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
+ * @dev: Device to which command will be sent
+ *
+ * Issue SET FEATURES - XFER MODE command to device @dev
+ * on port @ap.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask otherwise.
+ */
+
+static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* set up set-features taskfile */
+ DPRINTK("set features - xfer mode\n");
+
+ /* Some controllers and ATAPI devices show flaky interrupt
+ * behavior after setting xfer mode. Use polling instead.
+ */
+ ata_tf_init(dev, &tf);
+ tf.command = ATA_CMD_SET_FEATURES;
+ tf.feature = SETFEATURES_XFER;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
+ tf.protocol = ATA_PROT_NODATA;
+ /* If we are using IORDY we must send the mode setting command */
+ if (ata_pio_need_iordy(dev))
+ tf.nsect = dev->xfer_mode;
+ /* If the device has IORDY and the controller does not - turn it off */
+ else if (ata_id_has_iordy(dev->id))
+ tf.nsect = 0x01;
+ else /* In the ancient relic department - skip all of this */
+ return 0;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+
+ DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ return err_mask;
+}
+/**
+ * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
+ * @dev: Device to which command will be sent
+ * @enable: Whether to enable or disable the feature
+ * @feature: The sector count represents the feature to set
+ *
+ * Issue SET FEATURES - SATA FEATURES command to device @dev
+ * on port @ap with sector count
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask otherwise.
+ */
+static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
+ u8 feature)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* set up set-features taskfile */
+ DPRINTK("set features - SATA features\n");
+
+ ata_tf_init(dev, &tf);
+ tf.command = ATA_CMD_SET_FEATURES;
+ tf.feature = enable;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.nsect = feature;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+
+ DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ return err_mask;
+}
+
+/**
+ * ata_dev_init_params - Issue INIT DEV PARAMS command
+ * @dev: Device to which command will be sent
+ * @heads: Number of heads (taskfile parameter)
+ * @sectors: Number of sectors (taskfile parameter)
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask otherwise.
+ */
+static unsigned int ata_dev_init_params(struct ata_device *dev,
+ u16 heads, u16 sectors)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* Number of sectors per track 1-255. Number of heads 1-16 */
+ if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+ return AC_ERR_INVALID;
+
+ /* set up init dev params taskfile */
+ DPRINTK("init dev params \n");
+
+ ata_tf_init(dev, &tf);
+ tf.command = ATA_CMD_INIT_DEV_PARAMS;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.nsect = sectors;
+ tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* A clean abort indicates an original or just out of spec drive
+ and we should continue as we issue the setup based on the
+ drive reported working geometry */
+ if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
+ err_mask = 0;
+
+ DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ return err_mask;
+}
+
+/**
+ * ata_sg_clean - Unmap DMA memory associated with command
+ * @qc: Command containing DMA memory to be released
+ *
+ * Unmap all mapped DMA memory associated with this command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg = qc->sg;
+ int dir = qc->dma_dir;
+
+ WARN_ON(sg == NULL);
+
+ VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
+
+ qc->flags &= ~ATA_QCFLAG_DMAMAP;
+ qc->sg = NULL;
+}
+
+/**
+ * atapi_check_dma - Check whether ATAPI DMA can be supported
+ * @qc: Metadata associated with taskfile to check
+ *
+ * Allow low-level driver to filter ATA PACKET commands, returning
+ * a status indicating whether or not it is OK to use DMA for the
+ * supplied PACKET command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS: 0 when ATAPI DMA can be used
+ * nonzero otherwise
+ */
+int atapi_check_dma(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
+ * few ATAPI devices choke on such DMA requests.
+ */
+ if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
+ unlikely(qc->nbytes & 15))
+ return 1;
+
+ if (ap->ops->check_atapi_dma)
+ return ap->ops->check_atapi_dma(qc);
+
+ return 0;
+}
+
+/**
+ * ata_std_qc_defer - Check whether a qc needs to be deferred
+ * @qc: ATA command in question
+ *
+ * Non-NCQ commands cannot run with any other command, NCQ or
+ * not. As upper layer only knows the queue depth, we are
+ * responsible for maintaining exclusion. This function checks
+ * whether a new command @qc can be issued.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int ata_std_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_link *link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (!ata_tag_valid(link->active_tag))
+ return 0;
+ } else {
+ if (!ata_tag_valid(link->active_tag) && !link->sactive)
+ return 0;
+ }
+
+ return ATA_DEFER_LINK;
+}
+
+void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
+
+/**
+ * ata_sg_init - Associate command with scatter-gather table.
+ * @qc: Command to be associated
+ * @sg: Scatter-gather table.
+ * @n_elem: Number of elements in s/g table.
+ *
+ * Initialize the data-related elements of queued_cmd @qc
+ * to point to a scatter-gather table @sg, containing @n_elem
+ * elements.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+ unsigned int n_elem)
+{
+ qc->sg = sg;
+ qc->n_elem = n_elem;
+ qc->cursg = qc->sg;
+}
+
+/**
+ * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
+ * @qc: Command with scatter-gather table to be mapped.
+ *
+ * DMA-map the scatter-gather table associated with queued_cmd @qc.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, negative on error.
+ *
+ */
+static int ata_sg_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int n_elem;
+
+ VPRINTK("ENTER, ata%u\n", ap->print_id);
+
+ n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
+ if (n_elem < 1)
+ return -1;
+
+ DPRINTK("%d sg elements mapped\n", n_elem);
+
+ qc->n_elem = n_elem;
+ qc->flags |= ATA_QCFLAG_DMAMAP;
+
+ return 0;
+}
+
+/**
+ * swap_buf_le16 - swap halves of 16-bit words in place
+ * @buf: Buffer to swap
+ * @buf_words: Number of 16-bit words in buffer.
+ *
+ * Swap halves of 16-bit words if needed to convert from
+ * little-endian byte order to native cpu byte order, or
+ * vice-versa.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+ unsigned int i;
+
+ for (i = 0; i < buf_words; i++)
+ buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+/**
+ * ata_qc_new - Request an available ATA command, for queueing
+ * @ap: Port associated with device @dev
+ * @dev: Device from whom we request an available command structure
+ *
+ * LOCKING:
+ * None.
+ */
+
+static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc = NULL;
+ unsigned int i;
+
+ /* no command while frozen */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ return NULL;
+
+ /* the last tag is reserved for internal command. */
+ for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
+ if (!test_and_set_bit(i, &ap->qc_allocated)) {
+ qc = __ata_qc_from_tag(ap, i);
+ break;
+ }
+
+ if (qc)
+ qc->tag = i;
+
+ return qc;
+}
+
+/**
+ * ata_qc_new_init - Request an available ATA command, and initialize it
+ * @dev: Device from whom we request an available command structure
+ * @tag: command tag
+ *
+ * LOCKING:
+ * None.
+ */
+
+struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_new(ap);
+ if (qc) {
+ qc->scsicmd = NULL;
+ qc->ap = ap;
+ qc->dev = dev;
+
+ ata_qc_reinit(qc);
+ }
+
+ return qc;
+}
+
+/**
+ * ata_qc_free - free unused ata_queued_cmd
+ * @qc: Command to complete
+ *
+ * Designed to free unused ata_queued_cmd object
+ * in case something prevents using it.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_qc_free(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int tag;
+
+ WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+
+ qc->flags = 0;
+ tag = qc->tag;
+ if (likely(ata_tag_valid(tag))) {
+ qc->tag = ATA_TAG_POISON;
+ clear_bit(tag, &ap->qc_allocated);
+ }
+}
+
+void __ata_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
+
+ WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+ WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+
+ if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
+ ata_sg_clean(qc);
+
+ /* command should be marked inactive atomically with qc completion */
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ link->sactive &= ~(1 << qc->tag);
+ if (!link->sactive)
+ ap->nr_active_links--;
+ } else {
+ link->active_tag = ATA_TAG_POISON;
+ ap->nr_active_links--;
+ }
+
+ /* clear exclusive status */
+ if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
+ ap->excl_link == link))
+ ap->excl_link = NULL;
+
+ /* atapi: mark qc as inactive to prevent the interrupt handler
+ * from completing the command twice later, before the error handler
+ * is called. (when rc != 0 and atapi request sense is needed)
+ */
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+ ap->qc_active &= ~(1 << qc->tag);
+
+ /* call completion callback */
+ qc->complete_fn(qc);
+}
+
+static void fill_result_tf(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ qc->result_tf.flags = qc->tf.flags;
+ ap->ops->qc_fill_rtf(qc);
+}
+
+static void ata_verify_xfer(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+
+ if (ata_tag_internal(qc->tag))
+ return;
+
+ if (ata_is_nodata(qc->tf.protocol))
+ return;
+
+ if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
+ return;
+
+ dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
+}
+
+/**
+ * ata_qc_complete - Complete an active ATA command
+ * @qc: Command to complete
+ *
+ * Indicate to the mid and upper layers that an ATA
+ * command has completed, with either an ok or not-ok status.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* XXX: New EH and old EH use different mechanisms to
+ * synchronize EH with regular execution path.
+ *
+ * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
+ * Normal execution path is responsible for not accessing a
+ * failed qc. libata core enforces the rule by returning NULL
+ * from ata_qc_from_tag() for failed qcs.
+ *
+ * Old EH depends on ata_qc_complete() nullifying completion
+ * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
+ * not synchronize with interrupt handler. Only PIO task is
+ * taken care of.
+ */
+ if (ap->ops->error_handler) {
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
+
+ WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
+
+ if (unlikely(qc->err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+
+ if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+ if (!ata_tag_internal(qc->tag)) {
+ /* always fill result TF for failed qc */
+ fill_result_tf(qc);
+ ata_qc_schedule_eh(qc);
+ return;
+ }
+ }
+
+ /* read result TF if requested */
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ fill_result_tf(qc);
+
+ /* Some commands need post-processing after successful
+ * completion.
+ */
+ switch (qc->tf.command) {
+ case ATA_CMD_SET_FEATURES:
+ if (qc->tf.feature != SETFEATURES_WC_ON &&
+ qc->tf.feature != SETFEATURES_WC_OFF)
+ break;
+ /* fall through */
+ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+ case ATA_CMD_SET_MULTI: /* multi_count changed */
+ /* revalidate device */
+ ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+ ata_port_schedule_eh(ap);
+ break;
+
+ case ATA_CMD_SLEEP:
+ dev->flags |= ATA_DFLAG_SLEEPING;
+ break;
+ }
+
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+ ata_verify_xfer(qc);
+
+ __ata_qc_complete(qc);
+ } else {
+ if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
+ return;
+
+ /* read result TF if failed or requested */
+ if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
+ fill_result_tf(qc);
+
+ __ata_qc_complete(qc);
+ }
+}
+
+/**
+ * ata_qc_complete_multiple - Complete multiple qcs successfully
+ * @ap: port in question
+ * @qc_active: new qc_active mask
+ *
+ * Complete in-flight commands. This functions is meant to be
+ * called from low-level driver's interrupt routine to complete
+ * requests normally. ap->qc_active and @qc_active is compared
+ * and commands are completed accordingly.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Number of completed commands on success, -errno otherwise.
+ */
+int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
+{
+ int nr_done = 0;
+ u32 done_mask;
+ int i;
+
+ done_mask = ap->qc_active ^ qc_active;
+
+ if (unlikely(done_mask & qc_active)) {
+ ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
+ "(%08x->%08x)\n", ap->qc_active, qc_active);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ struct ata_queued_cmd *qc;
+
+ if (!(done_mask & (1 << i)))
+ continue;
+
+ if ((qc = ata_qc_from_tag(ap, i))) {
+ ata_qc_complete(qc);
+ nr_done++;
+ }
+ }
+
+ return nr_done;
+}
+
+/**
+ * ata_qc_issue - issue taskfile to device
+ * @qc: command to issue to device
+ *
+ * Prepare an ATA command to submission to device.
+ * This includes mapping the data into a DMA-able
+ * area, filling in the S/G table, and finally
+ * writing the taskfile to hardware, starting the command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_link *link = qc->dev->link;
+ u8 prot = qc->tf.protocol;
+
+ /* Make sure only one non-NCQ command is outstanding. The
+ * check is skipped for old EH because it reuses active qc to
+ * request ATAPI sense.
+ */
+ WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+
+ if (ata_is_ncq(prot)) {
+ WARN_ON(link->sactive & (1 << qc->tag));
+
+ if (!link->sactive)
+ ap->nr_active_links++;
+ link->sactive |= 1 << qc->tag;
+ } else {
+ WARN_ON(link->sactive);
+
+ ap->nr_active_links++;
+ link->active_tag = qc->tag;
+ }
+
+ qc->flags |= ATA_QCFLAG_ACTIVE;
+ ap->qc_active |= 1 << qc->tag;
+
+ /* We guarantee to LLDs that they will have at least one
+ * non-zero sg if the command is a data command.
+ */
+ BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+
+ if (ata_is_dma(prot) || (ata_is_pio(prot) &&
+ (ap->flags & ATA_FLAG_PIO_DMA)))
+ if (ata_sg_setup(qc))
+ goto sg_err;
+
+ /* if device is sleeping, schedule reset and abort the link */
+ if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
+ link->eh_info.action |= ATA_EH_RESET;
+ ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
+ ata_link_abort(link);
+ return;
+ }
+
+ ap->ops->qc_prep(qc);
+
+ qc->err_mask |= ap->ops->qc_issue(qc);
+ if (unlikely(qc->err_mask))
+ goto err;
+ return;
+
+sg_err:
+ qc->err_mask |= AC_ERR_SYSTEM;
+err:
+ ata_qc_complete(qc);
+}
+
+/**
+ * sata_scr_valid - test whether SCRs are accessible
+ * @link: ATA link to test SCR accessibility for
+ *
+ * Test whether SCRs are accessible for @link.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if SCRs are accessible, 0 otherwise.
+ */
+int sata_scr_valid(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+
+ return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
+}
+
+/**
+ * sata_scr_read - read SCR register of the specified port
+ * @link: ATA link to read SCR for
+ * @reg: SCR to read
+ * @val: Place to store read value
+ *
+ * Read SCR register @reg of @link into *@val. This function is
+ * guaranteed to succeed if @link is ap->link, the cable type of
+ * the port is SATA and the port implements ->scr_read.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ if (ata_is_host_link(link)) {
+ if (sata_scr_valid(link))
+ return link->ap->ops->scr_read(link, reg, val);
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_read(link, reg, val);
+}
+
+/**
+ * sata_scr_write - write SCR register of the specified port
+ * @link: ATA link to write SCR for
+ * @reg: SCR to write
+ * @val: value to write
+ *
+ * Write @val to SCR register @reg of @link. This function is
+ * guaranteed to succeed if @link is ap->link, the cable type of
+ * the port is SATA and the port implements ->scr_read.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ if (ata_is_host_link(link)) {
+ if (sata_scr_valid(link))
+ return link->ap->ops->scr_write(link, reg, val);
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_write(link, reg, val);
+}
+
+/**
+ * sata_scr_write_flush - write SCR register of the specified port and flush
+ * @link: ATA link to write SCR for
+ * @reg: SCR to write
+ * @val: value to write
+ *
+ * This function is identical to sata_scr_write() except that this
+ * function performs flush after writing to the register.
+ *
+ * LOCKING:
+ * None if @link is ap->link. Kernel thread context otherwise.
+ *
+ * RETURNS:
+ * 0 on success, negative errno on failure.
+ */
+int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ if (ata_is_host_link(link)) {
+ int rc;
+
+ if (sata_scr_valid(link)) {
+ rc = link->ap->ops->scr_write(link, reg, val);
+ if (rc == 0)
+ rc = link->ap->ops->scr_read(link, reg, &val);
+ return rc;
+ }
+ return -EOPNOTSUPP;
+ }
+
+ return sata_pmp_scr_write(link, reg, val);
+}
+
+/**
+ * ata_phys_link_online - test whether the given link is online
+ * @link: ATA link to test
+ *
+ * Test whether @link is online. Note that this function returns
+ * 0 if online status of @link cannot be obtained, so
+ * ata_link_online(link) != !ata_link_offline(link).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port online status is available and online.
+ */
+bool ata_phys_link_online(struct ata_link *link)
+{
+ u32 sstatus;
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+ (sstatus & 0xf) == 0x3)
+ return true;
+ return false;
+}
+
+/**
+ * ata_phys_link_offline - test whether the given link is offline
+ * @link: ATA link to test
+ *
+ * Test whether @link is offline. Note that this function
+ * returns 0 if offline status of @link cannot be obtained, so
+ * ata_link_online(link) != !ata_link_offline(link).
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port offline status is available and offline.
+ */
+bool ata_phys_link_offline(struct ata_link *link)
+{
+ u32 sstatus;
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
+ (sstatus & 0xf) != 0x3)
+ return true;
+ return false;
+}
+
+/**
+ * ata_link_online - test whether the given link is online
+ * @link: ATA link to test
+ *
+ * Test whether @link is online. This is identical to
+ * ata_phys_link_online() when there's no slave link. When
+ * there's a slave link, this function should only be called on
+ * the master link and will return true if any of M/S links is
+ * online.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port online status is available and online.
+ */
+bool ata_link_online(struct ata_link *link)
+{
+ struct ata_link *slave = link->ap->slave_link;
+
+ WARN_ON(link == slave); /* shouldn't be called on slave link */
+
+ return ata_phys_link_online(link) ||
+ (slave && ata_phys_link_online(slave));
+}
+
+/**
+ * ata_link_offline - test whether the given link is offline
+ * @link: ATA link to test
+ *
+ * Test whether @link is offline. This is identical to
+ * ata_phys_link_offline() when there's no slave link. When
+ * there's a slave link, this function should only be called on
+ * the master link and will return true if both M/S links are
+ * offline.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port offline status is available and offline.
+ */
+bool ata_link_offline(struct ata_link *link)
+{
+ struct ata_link *slave = link->ap->slave_link;
+
+ WARN_ON(link == slave); /* shouldn't be called on slave link */
+
+ return ata_phys_link_offline(link) &&
+ (!slave || ata_phys_link_offline(slave));
+}
+
+#ifdef CONFIG_PM
+static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+ unsigned int action, unsigned int ehi_flags,
+ int wait)
+{
+ unsigned long flags;
+ int i, rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ struct ata_link *link;
+
+ /* Previous resume operation might still be in
+ * progress. Wait for PM_PENDING to clear.
+ */
+ if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ }
+
+ /* request PM ops to EH */
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->pm_mesg = mesg;
+ if (wait) {
+ rc = 0;
+ ap->pm_result = &rc;
+ }
+
+ ap->pflags |= ATA_PFLAG_PM_PENDING;
+ __ata_port_for_each_link(link, ap) {
+ link->eh_info.action |= action;
+ link->eh_info.flags |= ehi_flags;
+ }
+
+ ata_port_schedule_eh(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* wait and check result */
+ if (wait) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ata_host_suspend - suspend host
+ * @host: host to suspend
+ * @mesg: PM message
+ *
+ * Suspend @host. Actual operation is performed by EH. This
+ * function requests EH to perform PM operations and waits for EH
+ * to finish.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+ int rc;
+
+ /*
+ * disable link pm on all ports before requesting
+ * any pm activity
+ */
+ ata_lpm_enable(host);
+
+ rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+ if (rc == 0)
+ host->dev->power.power_state = mesg;
+ return rc;
+}
+
+/**
+ * ata_host_resume - resume host
+ * @host: host to resume
+ *
+ * Resume @host. Actual operation is performed by EH. This
+ * function requests EH to perform PM operations and returns.
+ * Note that all resume operations are performed parallely.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_host_resume(struct ata_host *host)
+{
+ ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
+ host->dev->power.power_state = PMSG_ON;
+
+ /* reenable link pm */
+ ata_lpm_disable(host);
+}
+#endif
+
+/**
+ * ata_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Allocates space for PRD table.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->dev;
+
+ ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
+ GFP_KERNEL);
+ if (!ap->prd)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ata_dev_init - Initialize an ata_device structure
+ * @dev: Device structure to initialize
+ *
+ * Initialize @dev in preparation for probing.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_dev_init(struct ata_device *dev)
+{
+ struct ata_link *link = ata_dev_phys_link(dev);
+ struct ata_port *ap = link->ap;
+ unsigned long flags;
+
+ /* SATA spd limit is bound to the attached device, reset together */
+ link->sata_spd_limit = link->hw_sata_spd_limit;
+ link->sata_spd = 0;
+
+ /* High bits of dev->flags are used to record warm plug
+ * requests which occur asynchronously. Synchronize using
+ * host lock.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags &= ~ATA_DFLAG_INIT_MASK;
+ dev->horkage = 0;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
+ sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
+ dev->pio_mask = UINT_MAX;
+ dev->mwdma_mask = UINT_MAX;
+ dev->udma_mask = UINT_MAX;
+}
+
+/**
+ * ata_link_init - Initialize an ata_link structure
+ * @ap: ATA port link is attached to
+ * @link: Link structure to initialize
+ * @pmp: Port multiplier port number
+ *
+ * Initialize @link.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
+{
+ int i;
+
+ /* clear everything except for devices */
+ memset(link, 0, offsetof(struct ata_link, device[0]));
+
+ link->ap = ap;
+ link->pmp = pmp;
+ link->active_tag = ATA_TAG_POISON;
+ link->hw_sata_spd_limit = UINT_MAX;
+
+ /* can't use iterator, ap isn't initialized yet */
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &link->device[i];
+
+ dev->link = link;
+ dev->devno = dev - link->device;
+ ata_dev_init(dev);
+ }
+}
+
+/**
+ * sata_link_init_spd - Initialize link->sata_spd_limit
+ * @link: Link to configure sata_spd_limit for
+ *
+ * Initialize @link->[hw_]sata_spd_limit to the currently
+ * configured value.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_link_init_spd(struct ata_link *link)
+{
+ u8 spd;
+ int rc;
+
+ rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
+ if (rc)
+ return rc;
+
+ spd = (link->saved_scontrol >> 4) & 0xf;
+ if (spd)
+ link->hw_sata_spd_limit &= (1 << spd) - 1;
+
+ ata_force_link_limits(link);
+
+ link->sata_spd_limit = link->hw_sata_spd_limit;
+
+ return 0;
+}
+
+/**
+ * ata_port_alloc - allocate and initialize basic ATA port resources
+ * @host: ATA host this allocated port belongs to
+ *
+ * Allocate and initialize basic ATA port resources.
+ *
+ * RETURNS:
+ * Allocate ATA port on success, NULL on failure.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+struct ata_port *ata_port_alloc(struct ata_host *host)
+{
+ struct ata_port *ap;
+
+ DPRINTK("ENTER\n");
+
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
+ if (!ap)
+ return NULL;
+
+ ap->pflags |= ATA_PFLAG_INITIALIZING;
+ ap->lock = &host->lock;
+ ap->flags = ATA_FLAG_DISABLED;
+ ap->print_id = -1;
+ ap->ctl = ATA_DEVCTL_OBS;
+ ap->host = host;
+ ap->dev = host->dev;
+ ap->last_ctl = 0xFF;
+
+#if defined(ATA_VERBOSE_DEBUG)
+ /* turn on all debugging levels */
+ ap->msg_enable = 0x00FF;
+#elif defined(ATA_DEBUG)
+ ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
+#else
+ ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
+#endif
+
+#ifdef CONFIG_ATA_SFF
+ INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+#else
+ INIT_DELAYED_WORK(&ap->port_task, NULL);
+#endif
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
+ INIT_LIST_HEAD(&ap->eh_done_q);
+ init_waitqueue_head(&ap->eh_wait_q);
+ init_completion(&ap->park_req_pending);
+ init_timer_deferrable(&ap->fastdrain_timer);
+ ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
+ ap->fastdrain_timer.data = (unsigned long)ap;
+
+ ap->cbl = ATA_CBL_NONE;
+
+ ata_link_init(ap, &ap->link, 0);
+
+#ifdef ATA_IRQ_TRAP
+ ap->stats.unhandled_irq = 1;
+ ap->stats.idle_irq = 1;
+#endif
+ return ap;
+}
+
+static void ata_host_release(struct device *gendev, void *res)
+{
+ struct ata_host *host = dev_get_drvdata(gendev);
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (!ap)
+ continue;
+
+ if (ap->scsi_host)
+ scsi_host_put(ap->scsi_host);
+
+ kfree(ap->pmp_link);
+ kfree(ap->slave_link);
+ kfree(ap);
+ host->ports[i] = NULL;
+ }
+
+ dev_set_drvdata(gendev, NULL);
+}
+
+/**
+ * ata_host_alloc - allocate and init basic ATA host resources
+ * @dev: generic device this host is associated with
+ * @max_ports: maximum number of ATA ports associated with this host
+ *
+ * Allocate and initialize basic ATA host resources. LLD calls
+ * this function to allocate a host, initializes it fully and
+ * attaches it using ata_host_register().
+ *
+ * @max_ports ports are allocated and host->n_ports is
+ * initialized to @max_ports. The caller is allowed to decrease
+ * host->n_ports before calling ata_host_register(). The unused
+ * ports will be automatically freed on registration.
+ *
+ * RETURNS:
+ * Allocate ATA host on success, NULL on failure.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+{
+ struct ata_host *host;
+ size_t sz;
+ int i;
+
+ DPRINTK("ENTER\n");
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return NULL;
+
+ /* alloc a container for our list of ATA ports (buses) */
+ sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
+ /* alloc a container for our list of ATA ports (buses) */
+ host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
+ if (!host)
+ goto err_out;
+
+ devres_add(dev, host);
+ dev_set_drvdata(dev, host);
+
+ spin_lock_init(&host->lock);
+ host->dev = dev;
+ host->n_ports = max_ports;
+
+ /* allocate ports bound to this host */
+ for (i = 0; i < max_ports; i++) {
+ struct ata_port *ap;
+
+ ap = ata_port_alloc(host);
+ if (!ap)
+ goto err_out;
+
+ ap->port_no = i;
+ host->ports[i] = ap;
+ }
+
+ devres_remove_group(dev, NULL);
+ return host;
+
+ err_out:
+ devres_release_group(dev, NULL);
+ return NULL;
+}
+
+/**
+ * ata_host_alloc_pinfo - alloc host and init with port_info array
+ * @dev: generic device this host is associated with
+ * @ppi: array of ATA port_info to initialize host with
+ * @n_ports: number of ATA ports attached to this host
+ *
+ * Allocate ATA host and initialize with info from @ppi. If NULL
+ * terminated, @ppi may contain fewer entries than @n_ports. The
+ * last entry will be used for the remaining ports.
+ *
+ * RETURNS:
+ * Allocate ATA host on success, NULL on failure.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ */
+struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+ const struct ata_port_info * const * ppi,
+ int n_ports)
+{
+ const struct ata_port_info *pi;
+ struct ata_host *host;
+ int i, j;
+
+ host = ata_host_alloc(dev, n_ports);
+ if (!host)
+ return NULL;
+
+ for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ppi[j])
+ pi = ppi[j++];
+
+ ap->pio_mask = pi->pio_mask;
+ ap->mwdma_mask = pi->mwdma_mask;
+ ap->udma_mask = pi->udma_mask;
+ ap->flags |= pi->flags;
+ ap->link.flags |= pi->link_flags;
+ ap->ops = pi->port_ops;
+
+ if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
+ host->ops = pi->port_ops;
+ }
+
+ return host;
+}
+
+/**
+ * ata_slave_link_init - initialize slave link
+ * @ap: port to initialize slave link for
+ *
+ * Create and initialize slave link for @ap. This enables slave
+ * link handling on the port.
+ *
+ * In libata, a port contains links and a link contains devices.
+ * There is single host link but if a PMP is attached to it,
+ * there can be multiple fan-out links. On SATA, there's usually
+ * a single device connected to a link but PATA and SATA
+ * controllers emulating TF based interface can have two - master
+ * and slave.
+ *
+ * However, there are a few controllers which don't fit into this
+ * abstraction too well - SATA controllers which emulate TF
+ * interface with both master and slave devices but also have
+ * separate SCR register sets for each device. These controllers
+ * need separate links for physical link handling
+ * (e.g. onlineness, link speed) but should be treated like a
+ * traditional M/S controller for everything else (e.g. command
+ * issue, softreset).
+ *
+ * slave_link is libata's way of handling this class of
+ * controllers without impacting core layer too much. For
+ * anything other than physical link handling, the default host
+ * link is used for both master and slave. For physical link
+ * handling, separate @ap->slave_link is used. All dirty details
+ * are implemented inside libata core layer. From LLD's POV, the
+ * only difference is that prereset, hardreset and postreset are
+ * called once more for the slave link, so the reset sequence
+ * looks like the following.
+ *
+ * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
+ * softreset(M) -> postreset(M) -> postreset(S)
+ *
+ * Note that softreset is called only for the master. Softreset
+ * resets both M/S by definition, so SRST on master should handle
+ * both (the standard method will work just fine).
+ *
+ * LOCKING:
+ * Should be called before host is registered.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_slave_link_init(struct ata_port *ap)
+{
+ struct ata_link *link;
+
+ WARN_ON(ap->slave_link);
+ WARN_ON(ap->flags & ATA_FLAG_PMP);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ ata_link_init(ap, link, 1);
+ ap->slave_link = link;
+ return 0;
+}
+
+static void ata_host_stop(struct device *gendev, void *res)
+{
+ struct ata_host *host = dev_get_drvdata(gendev);
+ int i;
+
+ WARN_ON(!(host->flags & ATA_HOST_STARTED));
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+
+ if (host->ops->host_stop)
+ host->ops->host_stop(host);
+}
+
+/**
+ * ata_finalize_port_ops - finalize ata_port_operations
+ * @ops: ata_port_operations to finalize
+ *
+ * An ata_port_operations can inherit from another ops and that
+ * ops can again inherit from another. This can go on as many
+ * times as necessary as long as there is no loop in the
+ * inheritance chain.
+ *
+ * Ops tables are finalized when the host is started. NULL or
+ * unspecified entries are inherited from the closet ancestor
+ * which has the method and the entry is populated with it.
+ * After finalization, the ops table directly points to all the
+ * methods and ->inherits is no longer necessary and cleared.
+ *
+ * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_finalize_port_ops(struct ata_port_operations *ops)
+{
+ static DEFINE_SPINLOCK(lock);
+ const struct ata_port_operations *cur;
+ void **begin = (void **)ops;
+ void **end = (void **)&ops->inherits;
+ void **pp;
+
+ if (!ops || !ops->inherits)
+ return;
+
+ spin_lock(&lock);
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+
+ for (pp = begin; pp < end; pp++, inherit++)
+ if (!*pp)
+ *pp = *inherit;
+ }
+
+ for (pp = begin; pp < end; pp++)
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+ ops->inherits = NULL;
+
+ spin_unlock(&lock);
+}
+
+/**
+ * ata_host_start - start and freeze ports of an ATA host
+ * @host: ATA host to start ports for
+ *
+ * Start and then freeze ports of @host. Started status is
+ * recorded in host->flags, so this function can be called
+ * multiple times. Ports are guaranteed to get started only
+ * once. If host->ops isn't initialized yet, its set to the
+ * first non-dummy port ops.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 if all ports are started successfully, -errno otherwise.
+ */
+int ata_host_start(struct ata_host *host)
+{
+ int have_stop = 0;
+ void *start_dr = NULL;
+ int i, rc;
+
+ if (host->flags & ATA_HOST_STARTED)
+ return 0;
+
+ ata_finalize_port_ops(host->ops);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_finalize_port_ops(ap->ops);
+
+ if (!host->ops && !ata_port_is_dummy(ap))
+ host->ops = ap->ops;
+
+ if (ap->ops->port_stop)
+ have_stop = 1;
+ }
+
+ if (host->ops->host_stop)
+ have_stop = 1;
+
+ if (have_stop) {
+ start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
+ if (!start_dr)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap->ops->port_start) {
+ rc = ap->ops->port_start(ap);
+ if (rc) {
+ if (rc != -ENODEV)
+ dev_printk(KERN_ERR, host->dev,
+ "failed to start port %d "
+ "(errno=%d)\n", i, rc);
+ goto err_out;
+ }
+ }
+ ata_eh_freeze_port(ap);
+ }
+
+ if (start_dr)
+ devres_add(host->dev, start_dr);
+ host->flags |= ATA_HOST_STARTED;
+ return 0;
+
+ err_out:
+ while (--i >= 0) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+ devres_free(start_dr);
+ return rc;
+}
+
+/**
+ * ata_sas_host_init - Initialize a host struct
+ * @host: host to initialize
+ * @dev: device host is attached to
+ * @flags: host flags
+ * @ops: port_ops
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ */
+/* KILLME - the only user left is ipr */
+void ata_host_init(struct ata_host *host, struct device *dev,
+ unsigned long flags, struct ata_port_operations *ops)
+{
+ spin_lock_init(&host->lock);
+ host->dev = dev;
+ host->flags = flags;
+ host->ops = ops;
+}
+
+/**
+ * ata_host_register - register initialized ATA host
+ * @host: ATA host to register
+ * @sht: template for SCSI host
+ *
+ * Register initialized ATA host. @host is allocated using
+ * ata_host_alloc() and fully initialized by LLD. This function
+ * starts ports, registers @host with ATA and SCSI layers and
+ * probe registered devices.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
+{
+ int i, rc;
+
+ /* host must have been started */
+ if (!(host->flags & ATA_HOST_STARTED)) {
+ dev_printk(KERN_ERR, host->dev,
+ "BUG: trying to register unstarted host\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Blow away unused ports. This happens when LLD can't
+ * determine the exact number of ports to allocate at
+ * allocation time.
+ */
+ for (i = host->n_ports; host->ports[i]; i++)
+ kfree(host->ports[i]);
+
+ /* give ports names and add SCSI hosts */
+ for (i = 0; i < host->n_ports; i++)
+ host->ports[i]->print_id = ata_print_id++;
+
+ rc = ata_scsi_add_hosts(host, sht);
+ if (rc)
+ return rc;
+
+ /* associate with ACPI nodes */
+ ata_acpi_associate(host);
+
+ /* set cable, sata_spd_limit and report */
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ unsigned long xfer_mask;
+
+ /* set SATA cable type if still unset */
+ if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
+ ap->cbl = ATA_CBL_SATA;
+
+ /* init sata_spd_limit to the current value */
+ sata_link_init_spd(&ap->link);
+ if (ap->slave_link)
+ sata_link_init_spd(ap->slave_link);
+
+ /* print per-port info to dmesg */
+ xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
+ ap->udma_mask);
+
+ if (!ata_port_is_dummy(ap)) {
+ ata_port_printk(ap, KERN_INFO,
+ "%cATA max %s %s\n",
+ (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
+ ata_mode_string(xfer_mask),
+ ap->link.eh_info.desc);
+ ata_ehi_clear_desc(&ap->link.eh_info);
+ } else
+ ata_port_printk(ap, KERN_INFO, "DUMMY\n");
+ }
+
+ /* perform each probe synchronously */
+ DPRINTK("probe begin\n");
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ /* probe */
+ if (ap->ops->error_handler) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ unsigned long flags;
+
+ ata_port_probe(ap);
+
+ /* kick EH for boot probing */
+ spin_lock_irqsave(ap->lock, flags);
+
+ ehi->probe_mask |= ATA_ALL_DEVICES;
+ ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
+ ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+
+ ap->pflags &= ~ATA_PFLAG_INITIALIZING;
+ ap->pflags |= ATA_PFLAG_LOADING;
+ ata_port_schedule_eh(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* wait for EH to finish */
+ ata_port_wait_eh(ap);
+ } else {
+ DPRINTK("ata%u: bus probe begin\n", ap->print_id);
+ rc = ata_bus_probe(ap);
+ DPRINTK("ata%u: bus probe end\n", ap->print_id);
+
+ if (rc) {
+ /* FIXME: do something useful here?
+ * Current libata behavior will
+ * tear down everything when
+ * the module is removed
+ * or the h/w is unplugged.
+ */
+ }
+ }
+ }
+
+ /* probes are done, now scan each port's disk(s) */
+ DPRINTK("host probe begin\n");
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_scsi_scan_host(ap, 1);
+ }
+
+ return 0;
+}
+
+/**
+ * ata_host_activate - start host, request IRQ and register it
+ * @host: target ATA host
+ * @irq: IRQ to request
+ * @irq_handler: irq_handler used when requesting IRQ
+ * @irq_flags: irq_flags used when requesting IRQ
+ * @sht: scsi_host_template to use when registering the host
+ *
+ * After allocating an ATA host and initializing it, most libata
+ * LLDs perform three steps to activate the host - start host,
+ * request IRQ and register it. This helper takes necessasry
+ * arguments and performs the three steps in one go.
+ *
+ * An invalid IRQ skips the IRQ registration and expects the host to
+ * have set polling mode on the port. In this case, @irq_handler
+ * should be NULL.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_host_activate(struct ata_host *host, int irq,
+ irq_handler_t irq_handler, unsigned long irq_flags,
+ struct scsi_host_template *sht)
+{
+ int i, rc;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ /* Special case for polling mode */
+ if (!irq) {
+ WARN_ON(irq_handler);
+ return ata_host_register(host, sht);
+ }
+
+ rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
+ dev_driver_string(host->dev), host);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_desc(host->ports[i], "irq %d", irq);
+
+ rc = ata_host_register(host, sht);
+ /* if failed, just free the IRQ and leave ports alone */
+ if (rc)
+ devm_free_irq(host->dev, irq, host);
+
+ return rc;
+}
+
+/**
+ * ata_port_detach - Detach ATA port in prepration of device removal
+ * @ap: ATA port to be detached
+ *
+ * Detach all ATA devices and the associated SCSI devices of @ap;
+ * then, remove the associated SCSI host. @ap is guaranteed to
+ * be quiescent on return from this function.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_port_detach(struct ata_port *ap)
+{
+ unsigned long flags;
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (!ap->ops->error_handler)
+ goto skip_eh;
+
+ /* tell EH we're leaving & flush EH */
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags |= ATA_PFLAG_UNLOADING;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_port_wait_eh(ap);
+
+ /* EH is now guaranteed to see UNLOADING - EH context belongs
+ * to us. Restore SControl and disable all existing devices.
+ */
+ __ata_port_for_each_link(link, ap) {
+ sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
+ ata_link_for_each_dev(dev, link)
+ ata_dev_disable(dev);
+ }
+
+ /* Final freeze & EH. All in-flight commands are aborted. EH
+ * will be skipped and retrials will be terminated with bad
+ * target.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ ata_port_freeze(ap); /* won't be thawed */
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_port_wait_eh(ap);
+ cancel_rearming_delayed_work(&ap->hotplug_task);
+
+ skip_eh:
+ /* remove the associated SCSI host */
+ scsi_remove_host(ap->scsi_host);
+}
+
+/**
+ * ata_host_detach - Detach all ports of an ATA host
+ * @host: Host to detach
+ *
+ * Detach all ports of @host.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_host_detach(struct ata_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->n_ports; i++)
+ ata_port_detach(host->ports[i]);
+
+ /* the host is dead now, dissociate ACPI */
+ ata_acpi_dissociate(host);
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ * ata_pci_remove_one - PCI layer callback for device removal
+ * @pdev: PCI device that was removed
+ *
+ * PCI layer indicates to libata via this hook that hot-unplug or
+ * module unload event has occurred. Detach all ports. Resource
+ * release is handled via devres.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ */
+void ata_pci_remove_one(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+}
+
+/* move to PCI subsystem */
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+{
+ unsigned long tmp = 0;
+
+ switch (bits->width) {
+ case 1: {
+ u8 tmp8 = 0;
+ pci_read_config_byte(pdev, bits->reg, &tmp8);
+ tmp = tmp8;
+ break;
+ }
+ case 2: {
+ u16 tmp16 = 0;
+ pci_read_config_word(pdev, bits->reg, &tmp16);
+ tmp = tmp16;
+ break;
+ }
+ case 4: {
+ u32 tmp32 = 0;
+ pci_read_config_dword(pdev, bits->reg, &tmp32);
+ tmp = tmp32;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ tmp &= bits->mask;
+
+ return (tmp == bits->val) ? 1 : 0;
+}
+
+#ifdef CONFIG_PM
+void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ if (mesg.event & PM_EVENT_SLEEP)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+int ata_pci_device_do_resume(struct pci_dev *pdev)
+{
+ int rc;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ rc = pcim_enable_device(pdev);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
+ return rc;
+ }
+
+ pci_set_master(pdev);
+ return 0;
+}
+
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = ata_host_suspend(host, mesg);
+ if (rc)
+ return rc;
+
+ ata_pci_device_do_suspend(pdev, mesg);
+
+ return 0;
+}
+
+int ata_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc == 0)
+ ata_host_resume(host);
+ return rc;
+}
+#endif /* CONFIG_PM */
+
+#endif /* CONFIG_PCI */
+
+static int __init ata_parse_force_one(char **cur,
+ struct ata_force_ent *force_ent,
+ const char **reason)
+{
+ /* FIXME: Currently, there's no way to tag init const data and
+ * using __initdata causes build failure on some versions of
+ * gcc. Once __initdataconst is implemented, add const to the
+ * following structure.
+ */
+ static struct ata_force_param force_tbl[] __initdata = {
+ { "40c", .cbl = ATA_CBL_PATA40 },
+ { "80c", .cbl = ATA_CBL_PATA80 },
+ { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
+ { "unk", .cbl = ATA_CBL_PATA_UNK },
+ { "ign", .cbl = ATA_CBL_PATA_IGN },
+ { "sata", .cbl = ATA_CBL_SATA },
+ { "1.5Gbps", .spd_limit = 1 },
+ { "3.0Gbps", .spd_limit = 2 },
+ { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
+ { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
+ { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
+ { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
+ { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
+ { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
+ { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
+ { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
+ { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
+ { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
+ { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
+ { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
+ { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
+ { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
+ { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
+ { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
+ { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+ const struct ata_force_param *match_fp = NULL;
+ int nr_matches = 0, i;
+
+ /* find where this param ends and update *cur */
+ while (*p != '\0' && *p != ',')
+ p++;
+
+ if (*p == '\0')
+ *cur = p;
+ else
+ *cur = p + 1;
+
+ *p = '\0';
+
+ /* parse */
+ p = strchr(start, ':');
+ if (!p) {
+ val = strstrip(start);
+ goto parse_val;
+ }
+ *p = '\0';
+
+ id = strstrip(start);
+ val = strstrip(p + 1);
+
+ /* parse id */
+ p = strchr(id, '.');
+ if (p) {
+ *p++ = '\0';
+ force_ent->device = simple_strtoul(p, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid device";
+ return -EINVAL;
+ }
+ }
+
+ force_ent->port = simple_strtoul(id, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid port/link";
+ return -EINVAL;
+ }
+
+ parse_val:
+ /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
+ for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
+ const struct ata_force_param *fp = &force_tbl[i];
+
+ if (strncasecmp(val, fp->name, strlen(val)))
+ continue;
+
+ nr_matches++;
+ match_fp = fp;
+
+ if (strcasecmp(val, fp->name) == 0) {
+ nr_matches = 1;
+ break;
+ }
+ }
+
+ if (!nr_matches) {
+ *reason = "unknown value";
+ return -EINVAL;
+ }
+ if (nr_matches > 1) {
+ *reason = "ambigious value";
+ return -EINVAL;
+ }
+
+ force_ent->param = *match_fp;
+
+ return 0;
+}
+
+static void __init ata_parse_force_param(void)
+{
+ int idx = 0, size = 1;
+ int last_port = -1, last_device = -1;
+ char *p, *cur, *next;
+
+ /* calculate maximum number of params and allocate force_tbl */
+ for (p = ata_force_param_buf; *p; p++)
+ if (*p == ',')
+ size++;
+
+ ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
+ if (!ata_force_tbl) {
+ printk(KERN_WARNING "ata: failed to extend force table, "
+ "libata.force ignored\n");
+ return;
+ }
+
+ /* parse and populate the table */
+ for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
+ const char *reason = "";
+ struct ata_force_ent te = { .port = -1, .device = -1 };
+
+ next = cur;
+ if (ata_parse_force_one(&next, &te, &reason)) {
+ printk(KERN_WARNING "ata: failed to parse force "
+ "parameter \"%s\" (%s)\n",
+ cur, reason);
+ continue;
+ }
+
+ if (te.port == -1) {
+ te.port = last_port;
+ te.device = last_device;
+ }
+
+ ata_force_tbl[idx++] = te;
+
+ last_port = te.port;
+ last_device = te.device;
+ }
+
+ ata_force_tbl_size = idx;
+}
+
+static int __init ata_init(void)
+{
+ ata_parse_force_param();
+
+ ata_wq = create_workqueue("ata");
+ if (!ata_wq)
+ goto free_force_tbl;
+
+ ata_aux_wq = create_singlethread_workqueue("ata_aux");
+ if (!ata_aux_wq)
+ goto free_wq;
+
+ printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
+ return 0;
+
+free_wq:
+ destroy_workqueue(ata_wq);
+free_force_tbl:
+ kfree(ata_force_tbl);
+ return -ENOMEM;
+}
+
+static void __exit ata_exit(void)
+{
+ kfree(ata_force_tbl);
+ destroy_workqueue(ata_wq);
+ destroy_workqueue(ata_aux_wq);
+}
+
+subsys_initcall(ata_init);
+module_exit(ata_exit);
+
+static unsigned long ratelimit_time;
+static DEFINE_SPINLOCK(ata_ratelimit_lock);
+
+int ata_ratelimit(void)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_ratelimit_lock, flags);
+
+ if (time_after(jiffies, ratelimit_time)) {
+ rc = 1;
+ ratelimit_time = jiffies + (HZ/5);
+ } else
+ rc = 0;
+
+ spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
+
+ return rc;
+}
+
+/**
+ * ata_wait_register - wait until register value changes
+ * @reg: IO-mapped register
+ * @mask: Mask to apply to read register value
+ * @val: Wait condition
+ * @interval: polling interval in milliseconds
+ * @timeout: timeout in milliseconds
+ *
+ * Waiting for some bits of register to change is a common
+ * operation for ATA controllers. This function reads 32bit LE
+ * IO-mapped register @reg and tests for the following condition.
+ *
+ * (*@reg & mask) != val
+ *
+ * If the condition is met, it returns; otherwise, the process is
+ * repeated after @interval_msec until timeout.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * The final register value.
+ */
+u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
+ unsigned long interval, unsigned long timeout)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ tmp = ioread32(reg);
+
+ /* Calculate timeout _after_ the first read to make sure
+ * preceding writes reach the controller before starting to
+ * eat away the timeout.
+ */
+ deadline = ata_deadline(jiffies, timeout);
+
+ while ((tmp & mask) == val && time_before(jiffies, deadline)) {
+ msleep(interval);
+ tmp = ioread32(reg);
+ }
+
+ return tmp;
+}
+
+/*
+ * Dummy port_ops
+ */
+static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
+{
+ return AC_ERR_SYSTEM;
+}
+
+static void ata_dummy_error_handler(struct ata_port *ap)
+{
+ /* truly dummy */
+}
+
+struct ata_port_operations ata_dummy_port_ops = {
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = ata_dummy_qc_issue,
+ .error_handler = ata_dummy_error_handler,
+};
+
+const struct ata_port_info ata_dummy_port_info = {
+ .port_ops = &ata_dummy_port_ops,
+};
+
+/*
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers. As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
+ */
+EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
+EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
+EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+EXPORT_SYMBOL_GPL(ata_base_port_ops);
+EXPORT_SYMBOL_GPL(sata_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
+EXPORT_SYMBOL_GPL(ata_dummy_port_info);
+EXPORT_SYMBOL_GPL(__ata_port_next_link);
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
+EXPORT_SYMBOL_GPL(ata_host_init);
+EXPORT_SYMBOL_GPL(ata_host_alloc);
+EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
+EXPORT_SYMBOL_GPL(ata_slave_link_init);
+EXPORT_SYMBOL_GPL(ata_host_start);
+EXPORT_SYMBOL_GPL(ata_host_register);
+EXPORT_SYMBOL_GPL(ata_host_activate);
+EXPORT_SYMBOL_GPL(ata_host_detach);
+EXPORT_SYMBOL_GPL(ata_sg_init);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
+EXPORT_SYMBOL_GPL(atapi_cmd_type);
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
+EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
+EXPORT_SYMBOL_GPL(ata_mode_string);
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
+EXPORT_SYMBOL_GPL(ata_port_start);
+EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_std_qc_defer);
+EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
+EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(ata_dev_disable);
+EXPORT_SYMBOL_GPL(sata_set_spd);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
+EXPORT_SYMBOL_GPL(sata_link_debounce);
+EXPORT_SYMBOL_GPL(sata_link_resume);
+EXPORT_SYMBOL_GPL(ata_std_prereset);
+EXPORT_SYMBOL_GPL(sata_link_hardreset);
+EXPORT_SYMBOL_GPL(sata_std_hardreset);
+EXPORT_SYMBOL_GPL(ata_std_postreset);
+EXPORT_SYMBOL_GPL(ata_dev_classify);
+EXPORT_SYMBOL_GPL(ata_dev_pair);
+EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
+EXPORT_SYMBOL_GPL(ata_wait_register);
+EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+EXPORT_SYMBOL_GPL(sata_scr_valid);
+EXPORT_SYMBOL_GPL(sata_scr_read);
+EXPORT_SYMBOL_GPL(sata_scr_write);
+EXPORT_SYMBOL_GPL(sata_scr_write_flush);
+EXPORT_SYMBOL_GPL(ata_link_online);
+EXPORT_SYMBOL_GPL(ata_link_offline);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL_GPL(ata_host_suspend);
+EXPORT_SYMBOL_GPL(ata_host_resume);
+#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ata_id_string);
+EXPORT_SYMBOL_GPL(ata_id_c_string);
+EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
+EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
+EXPORT_SYMBOL_GPL(ata_timing_compute);
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
+EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_resume);
+#endif /* CONFIG_PM */
+#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
+EXPORT_SYMBOL_GPL(ata_port_desc);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
+#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
+EXPORT_SYMBOL_GPL(ata_link_abort);
+EXPORT_SYMBOL_GPL(ata_port_abort);
+EXPORT_SYMBOL_GPL(ata_port_freeze);
+EXPORT_SYMBOL_GPL(sata_async_notification);
+EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
+EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
+EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
+EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
+EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
+EXPORT_SYMBOL_GPL(ata_do_eh);
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
+
+EXPORT_SYMBOL_GPL(ata_cable_40wire);
+EXPORT_SYMBOL_GPL(ata_cable_80wire);
+EXPORT_SYMBOL_GPL(ata_cable_unknown);
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
+EXPORT_SYMBOL_GPL(ata_cable_sata);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
new file mode 100644
index 0000000..97a7dbc
--- /dev/null
+++ b/drivers/ata/libata-eh.c
@@ -0,0 +1,3423 @@
+/*
+ * libata-eh.c - libata error handling
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2006 Tejun Heo <htejun@gmail.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/ and
+ * http://www.sata-io.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include "../scsi/scsi_transport_api.h"
+
+#include <linux/libata.h>
+
+#include "libata.h"
+
+enum {
+ /* speed down verdicts */
+ ATA_EH_SPDN_NCQ_OFF = (1 << 0),
+ ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
+ ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
+ ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
+
+ /* error flags */
+ ATA_EFLAG_IS_IO = (1 << 0),
+ ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
+
+ /* error categories */
+ ATA_ECAT_NONE = 0,
+ ATA_ECAT_ATA_BUS = 1,
+ ATA_ECAT_TOUT_HSM = 2,
+ ATA_ECAT_UNK_DEV = 3,
+ ATA_ECAT_DUBIOUS_NONE = 4,
+ ATA_ECAT_DUBIOUS_ATA_BUS = 5,
+ ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
+ ATA_ECAT_DUBIOUS_UNK_DEV = 7,
+ ATA_ECAT_NR = 8,
+
+ ATA_EH_CMD_DFL_TIMEOUT = 5000,
+
+ /* always put at least this amount of time between resets */
+ ATA_EH_RESET_COOL_DOWN = 5000,
+
+ /* Waiting in ->prereset can never be reliable. It's
+ * sometimes nice to wait there but it can't be depended upon;
+ * otherwise, we wouldn't be resetting. Just give it enough
+ * time for most drives to spin up.
+ */
+ ATA_EH_PRERESET_TIMEOUT = 10000,
+ ATA_EH_FASTDRAIN_INTERVAL = 3000,
+
+ ATA_EH_UA_TRIES = 5,
+};
+
+/* The following table determines how we sequence resets. Each entry
+ * represents timeout for that try. The first try can be soft or
+ * hardreset. All others are hardreset if available. In most cases
+ * the first reset w/ 10sec timeout should succeed. Following entries
+ * are mostly for error handling, hotplug and retarded devices.
+ */
+static const unsigned long ata_eh_reset_timeouts[] = {
+ 10000, /* most drives spin up by 10sec */
+ 10000, /* > 99% working drives spin up before 20sec */
+ 35000, /* give > 30 secs of idleness for retarded devices */
+ 5000, /* and sweet one last chance */
+ ULONG_MAX, /* > 1 min has elapsed, give up */
+};
+
+static const unsigned long ata_eh_identify_timeouts[] = {
+ 5000, /* covers > 99% of successes and not too boring on failures */
+ 10000, /* combined time till here is enough even for media access */
+ 30000, /* for true idiots */
+ ULONG_MAX,
+};
+
+static const unsigned long ata_eh_other_timeouts[] = {
+ 5000, /* same rationale as identify timeout */
+ 10000, /* ditto */
+ /* but no merciful 30sec for other commands, it just isn't worth it */
+ ULONG_MAX,
+};
+
+struct ata_eh_cmd_timeout_ent {
+ const u8 *commands;
+ const unsigned long *timeouts;
+};
+
+/* The following table determines timeouts to use for EH internal
+ * commands. Each table entry is a command class and matches the
+ * commands the entry applies to and the timeout table to use.
+ *
+ * On the retry after a command timed out, the next timeout value from
+ * the table is used. If the table doesn't contain further entries,
+ * the last value is used.
+ *
+ * ehc->cmd_timeout_idx keeps track of which timeout to use per
+ * command class, so if SET_FEATURES times out on the first try, the
+ * next try will use the second timeout value only for that class.
+ */
+#define CMDS(cmds...) (const u8 []){ cmds, 0 }
+static const struct ata_eh_cmd_timeout_ent
+ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
+ { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
+ .timeouts = ata_eh_identify_timeouts, },
+ { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
+ .timeouts = ata_eh_other_timeouts, },
+ { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
+ .timeouts = ata_eh_other_timeouts, },
+ { .commands = CMDS(ATA_CMD_SET_FEATURES),
+ .timeouts = ata_eh_other_timeouts, },
+ { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
+ .timeouts = ata_eh_other_timeouts, },
+};
+#undef CMDS
+
+static void __ata_port_freeze(struct ata_port *ap);
+#ifdef CONFIG_PM
+static void ata_eh_handle_port_suspend(struct ata_port *ap);
+static void ata_eh_handle_port_resume(struct ata_port *ap);
+#else /* CONFIG_PM */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{ }
+
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{ }
+#endif /* CONFIG_PM */
+
+static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
+ va_list args)
+{
+ ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
+ ATA_EH_DESC_LEN - ehi->desc_len,
+ fmt, args);
+}
+
+/**
+ * __ata_ehi_push_desc - push error description without adding separator
+ * @ehi: target EHI
+ * @fmt: printf format string
+ *
+ * Format string according to @fmt and append it to @ehi->desc.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __ata_ehi_pushv_desc(ehi, fmt, args);
+ va_end(args);
+}
+
+/**
+ * ata_ehi_push_desc - push error description with separator
+ * @ehi: target EHI
+ * @fmt: printf format string
+ *
+ * Format string according to @fmt and append it to @ehi->desc.
+ * If @ehi->desc is not empty, ", " is added in-between.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+ va_list args;
+
+ if (ehi->desc_len)
+ __ata_ehi_push_desc(ehi, ", ");
+
+ va_start(args, fmt);
+ __ata_ehi_pushv_desc(ehi, fmt, args);
+ va_end(args);
+}
+
+/**
+ * ata_ehi_clear_desc - clean error description
+ * @ehi: target EHI
+ *
+ * Clear @ehi->desc.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_ehi_clear_desc(struct ata_eh_info *ehi)
+{
+ ehi->desc[0] = '\0';
+ ehi->desc_len = 0;
+}
+
+/**
+ * ata_port_desc - append port description
+ * @ap: target ATA port
+ * @fmt: printf format string
+ *
+ * Format string according to @fmt and append it to port
+ * description. If port description is not empty, " " is added
+ * in-between. This function is to be used while initializing
+ * ata_host. The description is printed on host registration.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
+{
+ va_list args;
+
+ WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
+
+ if (ap->link.eh_info.desc_len)
+ __ata_ehi_push_desc(&ap->link.eh_info, " ");
+
+ va_start(args, fmt);
+ __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
+ va_end(args);
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ * ata_port_pbar_desc - append PCI BAR description
+ * @ap: target ATA port
+ * @bar: target PCI BAR
+ * @offset: offset into PCI BAR
+ * @name: name of the area
+ *
+ * If @offset is negative, this function formats a string which
+ * contains the name, address, size and type of the BAR and
+ * appends it to the port description. If @offset is zero or
+ * positive, only name and offsetted address is appended.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+ const char *name)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ char *type = "";
+ unsigned long long start, len;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
+ type = "m";
+ else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
+ type = "i";
+
+ start = (unsigned long long)pci_resource_start(pdev, bar);
+ len = (unsigned long long)pci_resource_len(pdev, bar);
+
+ if (offset < 0)
+ ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
+ else
+ ata_port_desc(ap, "%s 0x%llx", name,
+ start + (unsigned long long)offset);
+}
+
+#endif /* CONFIG_PCI */
+
+static int ata_lookup_timeout_table(u8 cmd)
+{
+ int i;
+
+ for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
+ const u8 *cur;
+
+ for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
+ if (*cur == cmd)
+ return i;
+ }
+
+ return -1;
+}
+
+/**
+ * ata_internal_cmd_timeout - determine timeout for an internal command
+ * @dev: target device
+ * @cmd: internal command to be issued
+ *
+ * Determine timeout for internal command @cmd for @dev.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * Determined timeout.
+ */
+unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int ent = ata_lookup_timeout_table(cmd);
+ int idx;
+
+ if (ent < 0)
+ return ATA_EH_CMD_DFL_TIMEOUT;
+
+ idx = ehc->cmd_timeout_idx[dev->devno][ent];
+ return ata_eh_cmd_timeout_table[ent].timeouts[idx];
+}
+
+/**
+ * ata_internal_cmd_timed_out - notification for internal command timeout
+ * @dev: target device
+ * @cmd: internal command which timed out
+ *
+ * Notify EH that internal command @cmd for @dev timed out. This
+ * function should be called only for commands whose timeouts are
+ * determined using ata_internal_cmd_timeout().
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ int ent = ata_lookup_timeout_table(cmd);
+ int idx;
+
+ if (ent < 0)
+ return;
+
+ idx = ehc->cmd_timeout_idx[dev->devno][ent];
+ if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
+ ehc->cmd_timeout_idx[dev->devno][ent]++;
+}
+
+static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
+ unsigned int err_mask)
+{
+ struct ata_ering_entry *ent;
+
+ WARN_ON(!err_mask);
+
+ ering->cursor++;
+ ering->cursor %= ATA_ERING_SIZE;
+
+ ent = &ering->ring[ering->cursor];
+ ent->eflags = eflags;
+ ent->err_mask = err_mask;
+ ent->timestamp = get_jiffies_64();
+}
+
+static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
+{
+ struct ata_ering_entry *ent = &ering->ring[ering->cursor];
+
+ if (ent->err_mask)
+ return ent;
+ return NULL;
+}
+
+static void ata_ering_clear(struct ata_ering *ering)
+{
+ memset(ering, 0, sizeof(*ering));
+}
+
+static int ata_ering_map(struct ata_ering *ering,
+ int (*map_fn)(struct ata_ering_entry *, void *),
+ void *arg)
+{
+ int idx, rc = 0;
+ struct ata_ering_entry *ent;
+
+ idx = ering->cursor;
+ do {
+ ent = &ering->ring[idx];
+ if (!ent->err_mask)
+ break;
+ rc = map_fn(ent, arg);
+ if (rc)
+ break;
+ idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
+ } while (idx != ering->cursor);
+
+ return rc;
+}
+
+static unsigned int ata_eh_dev_action(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+
+ return ehc->i.action | ehc->i.dev_action[dev->devno];
+}
+
+static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
+ struct ata_eh_info *ehi, unsigned int action)
+{
+ struct ata_device *tdev;
+
+ if (!dev) {
+ ehi->action &= ~action;
+ ata_link_for_each_dev(tdev, link)
+ ehi->dev_action[tdev->devno] &= ~action;
+ } else {
+ /* doesn't make sense for port-wide EH actions */
+ WARN_ON(!(action & ATA_EH_PERDEV_MASK));
+
+ /* break ehi->action into ehi->dev_action */
+ if (ehi->action & action) {
+ ata_link_for_each_dev(tdev, link)
+ ehi->dev_action[tdev->devno] |=
+ ehi->action & action;
+ ehi->action &= ~action;
+ }
+
+ /* turn off the specified per-dev action */
+ ehi->dev_action[dev->devno] &= ~action;
+ }
+}
+
+/**
+ * ata_scsi_timed_out - SCSI layer time out callback
+ * @cmd: timed out SCSI command
+ *
+ * Handles SCSI layer timeout. We race with normal completion of
+ * the qc for @cmd. If the qc is already gone, we lose and let
+ * the scsi command finish (EH_HANDLED). Otherwise, the qc has
+ * timed out and EH should be invoked. Prevent ata_qc_complete()
+ * from finishing it by setting EH_SCHEDULED and return
+ * EH_NOT_HANDLED.
+ *
+ * TODO: kill this function once old EH is gone.
+ *
+ * LOCKING:
+ * Called from timer context
+ *
+ * RETURNS:
+ * EH_HANDLED or EH_NOT_HANDLED
+ */
+enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *host = cmd->device->host;
+ struct ata_port *ap = ata_shost_to_port(host);
+ unsigned long flags;
+ struct ata_queued_cmd *qc;
+ enum blk_eh_timer_return ret;
+
+ DPRINTK("ENTER\n");
+
+ if (ap->ops->error_handler) {
+ ret = BLK_EH_NOT_HANDLED;
+ goto out;
+ }
+
+ ret = BLK_EH_HANDLED;
+ spin_lock_irqsave(ap->lock, flags);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc) {
+ WARN_ON(qc->scsicmd != cmd);
+ qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ ret = BLK_EH_NOT_HANDLED;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ out:
+ DPRINTK("EXIT, ret=%d\n", ret);
+ return ret;
+}
+
+/**
+ * ata_scsi_error - SCSI layer error handler callback
+ * @host: SCSI host on which error occurred
+ *
+ * Handles SCSI-layer-thrown error events.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ *
+ * RETURNS:
+ * Zero.
+ */
+void ata_scsi_error(struct Scsi_Host *host)
+{
+ struct ata_port *ap = ata_shost_to_port(host);
+ int i;
+ unsigned long flags;
+
+ DPRINTK("ENTER\n");
+
+ /* synchronize with port task */
+ ata_port_flush_task(ap);
+
+ /* synchronize with host lock and sort out timeouts */
+
+ /* For new EH, all qcs are finished in one of three ways -
+ * normal completion, error completion, and SCSI timeout.
+ * Both cmpletions can race against SCSI timeout. When normal
+ * completion wins, the qc never reaches EH. When error
+ * completion wins, the qc has ATA_QCFLAG_FAILED set.
+ *
+ * When SCSI timeout wins, things are a bit more complex.
+ * Normal or error completion can occur after the timeout but
+ * before this point. In such cases, both types of
+ * completions are honored. A scmd is determined to have
+ * timed out iff its associated qc is active and not failed.
+ */
+ if (ap->ops->error_handler) {
+ struct scsi_cmnd *scmd, *tmp;
+ int nr_timedout = 0;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
+ struct ata_queued_cmd *qc;
+
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ qc = __ata_qc_from_tag(ap, i);
+ if (qc->flags & ATA_QCFLAG_ACTIVE &&
+ qc->scsicmd == scmd)
+ break;
+ }
+
+ if (i < ATA_MAX_QUEUE) {
+ /* the scmd has an associated qc */
+ if (!(qc->flags & ATA_QCFLAG_FAILED)) {
+ /* which hasn't failed yet, timeout */
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ nr_timedout++;
+ }
+ } else {
+ /* Normal completion occurred after
+ * SCSI timeout but before this point.
+ * Successfully complete it.
+ */
+ scmd->retries = scmd->allowed;
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+ }
+ }
+
+ /* If we have timed out qcs. They belong to EH from
+ * this point but the state of the controller is
+ * unknown. Freeze the port to make sure the IRQ
+ * handler doesn't diddle with those qcs. This must
+ * be done atomically w.r.t. setting QCFLAG_FAILED.
+ */
+ if (nr_timedout)
+ __ata_port_freeze(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* initialize eh_tries */
+ ap->eh_tries = ATA_EH_MAX_TRIES;
+ } else
+ spin_unlock_wait(ap->lock);
+
+ repeat:
+ /* invoke error handler */
+ if (ap->ops->error_handler) {
+ struct ata_link *link;
+
+ /* kill fast drain timer */
+ del_timer_sync(&ap->fastdrain_timer);
+
+ /* process port resume request */
+ ata_eh_handle_port_resume(ap);
+
+ /* fetch & clear EH info */
+ spin_lock_irqsave(ap->lock, flags);
+
+ __ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
+
+ memset(&link->eh_context, 0, sizeof(link->eh_context));
+ link->eh_context.i = link->eh_info;
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
+
+ ata_link_for_each_dev(dev, link) {
+ int devno = dev->devno;
+
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ if (ata_ncq_enabled(dev))
+ ehc->saved_ncq_enabled |= 1 << devno;
+ }
+ }
+
+ ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ ap->excl_link = NULL; /* don't maintain exclusion over EH */
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* invoke EH, skip if unloading or suspended */
+ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
+ ap->ops->error_handler(ap);
+ else
+ ata_eh_finish(ap);
+
+ /* process port suspend request */
+ ata_eh_handle_port_suspend(ap);
+
+ /* Exception might have happend after ->error_handler
+ * recovered the port but before this point. Repeat
+ * EH in such case.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ap->pflags & ATA_PFLAG_EH_PENDING) {
+ if (--ap->eh_tries) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ goto repeat;
+ }
+ ata_port_printk(ap, KERN_ERR, "EH pending after %d "
+ "tries, giving up\n", ATA_EH_MAX_TRIES);
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ }
+
+ /* this run is complete, make sure EH info is clear */
+ __ata_port_for_each_link(link, ap)
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
+
+ /* Clear host_eh_scheduled while holding ap->lock such
+ * that if exception occurs after this point but
+ * before EH completion, SCSI midlayer will
+ * re-initiate EH.
+ */
+ host->host_eh_scheduled = 0;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ } else {
+ WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
+ ap->ops->eng_timeout(ap);
+ }
+
+ /* finish or retry handled scmd's and clean up */
+ WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
+
+ scsi_eh_flush_done_q(&ap->eh_done_q);
+
+ /* clean up */
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ap->pflags & ATA_PFLAG_LOADING)
+ ap->pflags &= ~ATA_PFLAG_LOADING;
+ else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
+ queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
+
+ if (ap->pflags & ATA_PFLAG_RECOVERED)
+ ata_port_printk(ap, KERN_INFO, "EH complete\n");
+
+ ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
+
+ /* tell wait_eh that we're done */
+ ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
+ wake_up_all(&ap->eh_wait_q);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_port_wait_eh - Wait for the currently pending EH to complete
+ * @ap: Port to wait EH for
+ *
+ * Wait until the currently pending EH is complete.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_port_wait_eh(struct ata_port *ap)
+{
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+
+ retry:
+ spin_lock_irqsave(ap->lock, flags);
+
+ while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+ prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irqrestore(ap->lock, flags);
+ schedule();
+ spin_lock_irqsave(ap->lock, flags);
+ }
+ finish_wait(&ap->eh_wait_q, &wait);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* make sure SCSI EH is complete */
+ if (scsi_host_in_recovery(ap->scsi_host)) {
+ msleep(10);
+ goto retry;
+ }
+}
+
+static int ata_eh_nr_in_flight(struct ata_port *ap)
+{
+ unsigned int tag;
+ int nr = 0;
+
+ /* count only non-internal commands */
+ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
+ if (ata_qc_from_tag(ap, tag))
+ nr++;
+
+ return nr;
+}
+
+void ata_eh_fastdrain_timerfn(unsigned long arg)
+{
+ struct ata_port *ap = (void *)arg;
+ unsigned long flags;
+ int cnt;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ cnt = ata_eh_nr_in_flight(ap);
+
+ /* are we done? */
+ if (!cnt)
+ goto out_unlock;
+
+ if (cnt == ap->fastdrain_cnt) {
+ unsigned int tag;
+
+ /* No progress during the last interval, tag all
+ * in-flight qcs as timed out and freeze the port.
+ */
+ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+ if (qc)
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ }
+
+ ata_port_freeze(ap);
+ } else {
+ /* some qcs have finished, give it another chance */
+ ap->fastdrain_cnt = cnt;
+ ap->fastdrain_timer.expires =
+ ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
+ add_timer(&ap->fastdrain_timer);
+ }
+
+ out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
+ * @ap: target ATA port
+ * @fastdrain: activate fast drain
+ *
+ * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
+ * is non-zero and EH wasn't pending before. Fast drain ensures
+ * that EH kicks in in timely manner.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+{
+ int cnt;
+
+ /* already scheduled? */
+ if (ap->pflags & ATA_PFLAG_EH_PENDING)
+ return;
+
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
+
+ if (!fastdrain)
+ return;
+
+ /* do we have in-flight qcs? */
+ cnt = ata_eh_nr_in_flight(ap);
+ if (!cnt)
+ return;
+
+ /* activate fast drain */
+ ap->fastdrain_cnt = cnt;
+ ap->fastdrain_timer.expires =
+ ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
+ add_timer(&ap->fastdrain_timer);
+}
+
+/**
+ * ata_qc_schedule_eh - schedule qc for error handling
+ * @qc: command to schedule error handling for
+ *
+ * Schedule error handling for @qc. EH will kick in as soon as
+ * other commands are drained.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ WARN_ON(!ap->ops->error_handler);
+
+ qc->flags |= ATA_QCFLAG_FAILED;
+ ata_eh_set_pending(ap, 1);
+
+ /* The following will fail if timeout has already expired.
+ * ata_scsi_error() takes care of such scmds on EH entry.
+ * Note that ATA_QCFLAG_FAILED is unconditionally set after
+ * this function completes.
+ */
+ blk_abort_request(qc->scsicmd->request);
+}
+
+/**
+ * ata_port_schedule_eh - schedule error handling without a qc
+ * @ap: ATA port to schedule EH for
+ *
+ * Schedule error handling for @ap. EH will kick in as soon as
+ * all commands are drained.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_port_schedule_eh(struct ata_port *ap)
+{
+ WARN_ON(!ap->ops->error_handler);
+
+ if (ap->pflags & ATA_PFLAG_INITIALIZING)
+ return;
+
+ ata_eh_set_pending(ap, 1);
+ scsi_schedule_eh(ap->scsi_host);
+
+ DPRINTK("port EH scheduled\n");
+}
+
+static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
+{
+ int tag, nr_aborted = 0;
+
+ WARN_ON(!ap->ops->error_handler);
+
+ /* we're gonna abort all commands, no need for fast drain */
+ ata_eh_set_pending(ap, 0);
+
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+
+ if (qc && (!link || qc->dev->link == link)) {
+ qc->flags |= ATA_QCFLAG_FAILED;
+ ata_qc_complete(qc);
+ nr_aborted++;
+ }
+ }
+
+ if (!nr_aborted)
+ ata_port_schedule_eh(ap);
+
+ return nr_aborted;
+}
+
+/**
+ * ata_link_abort - abort all qc's on the link
+ * @link: ATA link to abort qc's for
+ *
+ * Abort all active qc's active on @link and schedule EH.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Number of aborted qc's.
+ */
+int ata_link_abort(struct ata_link *link)
+{
+ return ata_do_link_abort(link->ap, link);
+}
+
+/**
+ * ata_port_abort - abort all qc's on the port
+ * @ap: ATA port to abort qc's for
+ *
+ * Abort all active qc's of @ap and schedule EH.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ * Number of aborted qc's.
+ */
+int ata_port_abort(struct ata_port *ap)
+{
+ return ata_do_link_abort(ap, NULL);
+}
+
+/**
+ * __ata_port_freeze - freeze port
+ * @ap: ATA port to freeze
+ *
+ * This function is called when HSM violation or some other
+ * condition disrupts normal operation of the port. Frozen port
+ * is not allowed to perform any operation until the port is
+ * thawed, which usually follows a successful reset.
+ *
+ * ap->ops->freeze() callback can be used for freezing the port
+ * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
+ * port cannot be frozen hardware-wise, the interrupt handler
+ * must ack and clear interrupts unconditionally while the port
+ * is frozen.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static void __ata_port_freeze(struct ata_port *ap)
+{
+ WARN_ON(!ap->ops->error_handler);
+
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ ap->pflags |= ATA_PFLAG_FROZEN;
+
+ DPRINTK("ata%u port frozen\n", ap->print_id);
+}
+
+/**
+ * ata_port_freeze - abort & freeze port
+ * @ap: ATA port to freeze
+ *
+ * Abort and freeze @ap.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Number of aborted commands.
+ */
+int ata_port_freeze(struct ata_port *ap)
+{
+ int nr_aborted;
+
+ WARN_ON(!ap->ops->error_handler);
+
+ nr_aborted = ata_port_abort(ap);
+ __ata_port_freeze(ap);
+
+ return nr_aborted;
+}
+
+/**
+ * sata_async_notification - SATA async notification handler
+ * @ap: ATA port where async notification is received
+ *
+ * Handler to be called when async notification via SDB FIS is
+ * received. This function schedules EH if necessary.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 1 if EH is scheduled, 0 otherwise.
+ */
+int sata_async_notification(struct ata_port *ap)
+{
+ u32 sntf;
+ int rc;
+
+ if (!(ap->flags & ATA_FLAG_AN))
+ return 0;
+
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+ if (rc == 0)
+ sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+ if (!sata_pmp_attached(ap) || rc) {
+ /* PMP is not attached or SNTF is not available */
+ if (!sata_pmp_attached(ap)) {
+ /* PMP is not attached. Check whether ATAPI
+ * AN is configured. If so, notify media
+ * change.
+ */
+ struct ata_device *dev = ap->link.device;
+
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (dev->flags & ATA_DFLAG_AN))
+ ata_scsi_media_change_notify(dev);
+ return 0;
+ } else {
+ /* PMP is attached but SNTF is not available.
+ * ATAPI async media change notification is
+ * not used. The PMP must be reporting PHY
+ * status change, schedule EH.
+ */
+ ata_port_schedule_eh(ap);
+ return 1;
+ }
+ } else {
+ /* PMP is attached and SNTF is available */
+ struct ata_link *link;
+
+ /* check and notify ATAPI AN */
+ ata_port_for_each_link(link, ap) {
+ if (!(sntf & (1 << link->pmp)))
+ continue;
+
+ if ((link->device->class == ATA_DEV_ATAPI) &&
+ (link->device->flags & ATA_DFLAG_AN))
+ ata_scsi_media_change_notify(link->device);
+ }
+
+ /* If PMP is reporting that PHY status of some
+ * downstream ports has changed, schedule EH.
+ */
+ if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
+ ata_port_schedule_eh(ap);
+ return 1;
+ }
+
+ return 0;
+ }
+}
+
+/**
+ * ata_eh_freeze_port - EH helper to freeze port
+ * @ap: ATA port to freeze
+ *
+ * Freeze @ap.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_freeze_port(struct ata_port *ap)
+{
+ unsigned long flags;
+
+ if (!ap->ops->error_handler)
+ return;
+
+ spin_lock_irqsave(ap->lock, flags);
+ __ata_port_freeze(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_port_thaw_port - EH helper to thaw port
+ * @ap: ATA port to thaw
+ *
+ * Thaw frozen port @ap.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_thaw_port(struct ata_port *ap)
+{
+ unsigned long flags;
+
+ if (!ap->ops->error_handler)
+ return;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
+
+ if (ap->ops->thaw)
+ ap->ops->thaw(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ DPRINTK("ata%u port thawed\n", ap->print_id);
+}
+
+static void ata_eh_scsidone(struct scsi_cmnd *scmd)
+{
+ /* nada */
+}
+
+static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+ qc->scsidone = ata_eh_scsidone;
+ __ata_qc_complete(qc);
+ WARN_ON(ata_tag_valid(qc->tag));
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+}
+
+/**
+ * ata_eh_qc_complete - Complete an active ATA command from EH
+ * @qc: Command to complete
+ *
+ * Indicate to the mid and upper layers that an ATA command has
+ * completed. To be used from EH.
+ */
+void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ scmd->retries = scmd->allowed;
+ __ata_eh_qc_complete(qc);
+}
+
+/**
+ * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
+ * @qc: Command to retry
+ *
+ * Indicate to the mid and upper layers that an ATA command
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+ * scmd->retries is decremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ if (!qc->err_mask && scmd->retries)
+ scmd->retries--;
+ __ata_eh_qc_complete(qc);
+}
+
+/**
+ * ata_eh_detach_dev - detach ATA device
+ * @dev: ATA device to detach
+ *
+ * Detach @dev.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_detach_dev(struct ata_device *dev)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ unsigned long flags;
+
+ ata_dev_disable(dev);
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ dev->flags &= ~ATA_DFLAG_DETACH;
+
+ if (ata_scsi_offline_dev(dev)) {
+ dev->flags |= ATA_DFLAG_DETACHED;
+ ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+ }
+
+ /* clear per-dev EH info */
+ ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
+ ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
+ ehc->saved_xfer_mode[dev->devno] = 0;
+ ehc->saved_ncq_enabled &= ~(1 << dev->devno);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_eh_about_to_do - about to perform eh_action
+ * @link: target ATA link
+ * @dev: target ATA dev for per-dev action (can be NULL)
+ * @action: action about to be performed
+ *
+ * Called just before performing EH actions to clear related bits
+ * in @link->eh_info such that eh actions are not unnecessarily
+ * repeated.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+ unsigned int action)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_info *ehi = &link->eh_info;
+ struct ata_eh_context *ehc = &link->eh_context;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ata_eh_clear_action(link, dev, ehi, action);
+
+ /* About to take EH action, set RECOVERED. Ignore actions on
+ * slave links as master will do them again.
+ */
+ if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
+ ap->pflags |= ATA_PFLAG_RECOVERED;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_eh_done - EH action complete
+* @ap: target ATA port
+ * @dev: target ATA dev for per-dev action (can be NULL)
+ * @action: action just completed
+ *
+ * Called right after performing EH actions to clear related bits
+ * in @link->eh_context.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+ unsigned int action)
+{
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ ata_eh_clear_action(link, dev, &ehc->i, action);
+}
+
+/**
+ * ata_err_string - convert err_mask to descriptive string
+ * @err_mask: error mask to convert to string
+ *
+ * Convert @err_mask to descriptive string. Errors are
+ * prioritized according to severity and only the most severe
+ * error is reported.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Descriptive string for @err_mask
+ */
+static const char *ata_err_string(unsigned int err_mask)
+{
+ if (err_mask & AC_ERR_HOST_BUS)
+ return "host bus error";
+ if (err_mask & AC_ERR_ATA_BUS)
+ return "ATA bus error";
+ if (err_mask & AC_ERR_TIMEOUT)
+ return "timeout";
+ if (err_mask & AC_ERR_HSM)
+ return "HSM violation";
+ if (err_mask & AC_ERR_SYSTEM)
+ return "internal error";
+ if (err_mask & AC_ERR_MEDIA)
+ return "media error";
+ if (err_mask & AC_ERR_INVALID)
+ return "invalid argument";
+ if (err_mask & AC_ERR_DEV)
+ return "device error";
+ return "unknown error";
+}
+
+/**
+ * ata_read_log_page - read a specific log page
+ * @dev: target device
+ * @page: page to read
+ * @buf: buffer to store read page
+ * @sectors: number of sectors to read
+ *
+ * Read log page using READ_LOG_EXT command.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask otherwise.
+ */
+static unsigned int ata_read_log_page(struct ata_device *dev,
+ u8 page, void *buf, unsigned int sectors)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ DPRINTK("read log page - page %d\n", page);
+
+ ata_tf_init(dev, &tf);
+ tf.command = ATA_CMD_READ_LOG_EXT;
+ tf.lbal = page;
+ tf.nsect = sectors;
+ tf.hob_nsect = sectors >> 8;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_PIO;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
+ buf, sectors * ATA_SECT_SIZE, 0);
+
+ DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ return err_mask;
+}
+
+/**
+ * ata_eh_read_log_10h - Read log page 10h for NCQ error details
+ * @dev: Device to read log page 10h from
+ * @tag: Resulting tag of the failed command
+ * @tf: Resulting taskfile registers of the failed command
+ *
+ * Read log page 10h to obtain NCQ error details and clear error
+ * condition.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int ata_eh_read_log_10h(struct ata_device *dev,
+ int *tag, struct ata_taskfile *tf)
+{
+ u8 *buf = dev->link->ap->sector_buf;
+ unsigned int err_mask;
+ u8 csum;
+ int i;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
+ if (err_mask)
+ return -EIO;
+
+ csum = 0;
+ for (i = 0; i < ATA_SECT_SIZE; i++)
+ csum += buf[i];
+ if (csum)
+ ata_dev_printk(dev, KERN_WARNING,
+ "invalid checksum 0x%x on log page 10h\n", csum);
+
+ if (buf[0] & 0x80)
+ return -ENOENT;
+
+ *tag = buf[0] & 0x1f;
+
+ tf->command = buf[2];
+ tf->feature = buf[3];
+ tf->lbal = buf[4];
+ tf->lbam = buf[5];
+ tf->lbah = buf[6];
+ tf->device = buf[7];
+ tf->hob_lbal = buf[8];
+ tf->hob_lbam = buf[9];
+ tf->hob_lbah = buf[10];
+ tf->nsect = buf[12];
+ tf->hob_nsect = buf[13];
+
+ return 0;
+}
+
+/**
+ * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
+ * @dev: target ATAPI device
+ * @r_sense_key: out parameter for sense_key
+ *
+ * Perform ATAPI TEST_UNIT_READY.
+ *
+ * LOCKING:
+ * EH context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+{
+ u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_NODATA;
+
+ err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+ if (err_mask == AC_ERR_DEV)
+ *r_sense_key = tf.feature >> 4;
+ return err_mask;
+}
+
+/**
+ * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
+ * @dev: device to perform REQUEST_SENSE to
+ * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
+ * @dfl_sense_key: default sense key to use
+ *
+ * Perform ATAPI REQUEST_SENSE after the device reported CHECK
+ * SENSE. This function is EH helper.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask on failure
+ */
+static unsigned int atapi_eh_request_sense(struct ata_device *dev,
+ u8 *sense_buf, u8 dfl_sense_key)
+{
+ u8 cdb[ATAPI_CDB_LEN] =
+ { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
+ struct ata_port *ap = dev->link->ap;
+ struct ata_taskfile tf;
+
+ DPRINTK("ATAPI request sense\n");
+
+ /* FIXME: is this needed? */
+ memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+
+ /* initialize sense_buf with the error register,
+ * for the case where they are -not- overwritten
+ */
+ sense_buf[0] = 0x70;
+ sense_buf[2] = dfl_sense_key;
+
+ /* some devices time out if garbage left in tf */
+ ata_tf_init(dev, &tf);
+
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+
+ /* is it pointless to prefer PIO for "safety reasons"? */
+ if (ap->flags & ATA_FLAG_PIO_DMA) {
+ tf.protocol = ATAPI_PROT_DMA;
+ tf.feature |= ATAPI_PKT_DMA;
+ } else {
+ tf.protocol = ATAPI_PROT_PIO;
+ tf.lbam = SCSI_SENSE_BUFFERSIZE;
+ tf.lbah = 0;
+ }
+
+ return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
+ sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
+}
+
+/**
+ * ata_eh_analyze_serror - analyze SError for a failed port
+ * @link: ATA link to analyze SError for
+ *
+ * Analyze SError if available and further determine cause of
+ * failure.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_eh_analyze_serror(struct ata_link *link)
+{
+ struct ata_eh_context *ehc = &link->eh_context;
+ u32 serror = ehc->i.serror;
+ unsigned int err_mask = 0, action = 0;
+ u32 hotplug_mask;
+
+ if (serror & (SERR_PERSISTENT | SERR_DATA)) {
+ err_mask |= AC_ERR_ATA_BUS;
+ action |= ATA_EH_RESET;
+ }
+ if (serror & SERR_PROTOCOL) {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_RESET;
+ }
+ if (serror & SERR_INTERNAL) {
+ err_mask |= AC_ERR_SYSTEM;
+ action |= ATA_EH_RESET;
+ }
+
+ /* Determine whether a hotplug event has occurred. Both
+ * SError.N/X are considered hotplug events for enabled or
+ * host links. For disabled PMP links, only N bit is
+ * considered as X bit is left at 1 for link plugging.
+ */
+ hotplug_mask = 0;
+
+ if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
+ hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
+ else
+ hotplug_mask = SERR_PHYRDY_CHG;
+
+ if (serror & hotplug_mask)
+ ata_ehi_hotplugged(&ehc->i);
+
+ ehc->i.err_mask |= err_mask;
+ ehc->i.action |= action;
+}
+
+/**
+ * ata_eh_analyze_ncq_error - analyze NCQ error
+ * @link: ATA link to analyze NCQ error for
+ *
+ * Read log page 10h, determine the offending qc and acquire
+ * error status TF. For NCQ device errors, all LLDDs have to do
+ * is setting AC_ERR_DEV in ehi->err_mask. This function takes
+ * care of the rest.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_eh_analyze_ncq_error(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev = link->device;
+ struct ata_queued_cmd *qc;
+ struct ata_taskfile tf;
+ int tag, rc;
+
+ /* if frozen, we can't do much */
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ return;
+
+ /* is it NCQ device error? */
+ if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
+ return;
+
+ /* has LLDD analyzed already? */
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ qc = __ata_qc_from_tag(ap, tag);
+
+ if (!(qc->flags & ATA_QCFLAG_FAILED))
+ continue;
+
+ if (qc->err_mask)
+ return;
+ }
+
+ /* okay, this error is ours */
+ rc = ata_eh_read_log_10h(dev, &tag, &tf);
+ if (rc) {
+ ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
+ "(errno=%d)\n", rc);
+ return;
+ }
+
+ if (!(link->sactive & (1 << tag))) {
+ ata_link_printk(link, KERN_ERR, "log page 10h reported "
+ "inactive tag %d\n", tag);
+ return;
+ }
+
+ /* we've got the perpetrator, condemn it */
+ qc = __ata_qc_from_tag(ap, tag);
+ memcpy(&qc->result_tf, &tf, sizeof(tf));
+ qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+ ehc->i.err_mask &= ~AC_ERR_DEV;
+}
+
+/**
+ * ata_eh_analyze_tf - analyze taskfile of a failed qc
+ * @qc: qc to analyze
+ * @tf: Taskfile registers to analyze
+ *
+ * Analyze taskfile of @qc and further determine cause of
+ * failure. This function also requests ATAPI sense data if
+ * avaliable.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Determined recovery action
+ */
+static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+ const struct ata_taskfile *tf)
+{
+ unsigned int tmp, action = 0;
+ u8 stat = tf->command, err = tf->feature;
+
+ if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
+ qc->err_mask |= AC_ERR_HSM;
+ return ATA_EH_RESET;
+ }
+
+ if (stat & (ATA_ERR | ATA_DF))
+ qc->err_mask |= AC_ERR_DEV;
+ else
+ return 0;
+
+ switch (qc->dev->class) {
+ case ATA_DEV_ATA:
+ if (err & ATA_ICRC)
+ qc->err_mask |= AC_ERR_ATA_BUS;
+ if (err & ATA_UNC)
+ qc->err_mask |= AC_ERR_MEDIA;
+ if (err & ATA_IDNF)
+ qc->err_mask |= AC_ERR_INVALID;
+ break;
+
+ case ATA_DEV_ATAPI:
+ if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
+ tmp = atapi_eh_request_sense(qc->dev,
+ qc->scsicmd->sense_buffer,
+ qc->result_tf.feature >> 4);
+ if (!tmp) {
+ /* ATA_QCFLAG_SENSE_VALID is used to
+ * tell atapi_qc_complete() that sense
+ * data is already valid.
+ *
+ * TODO: interpret sense data and set
+ * appropriate err_mask.
+ */
+ qc->flags |= ATA_QCFLAG_SENSE_VALID;
+ } else
+ qc->err_mask |= tmp;
+ }
+ }
+
+ if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
+ action |= ATA_EH_RESET;
+
+ return action;
+}
+
+static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
+ int *xfer_ok)
+{
+ int base = 0;
+
+ if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
+ *xfer_ok = 1;
+
+ if (!*xfer_ok)
+ base = ATA_ECAT_DUBIOUS_NONE;
+
+ if (err_mask & AC_ERR_ATA_BUS)
+ return base + ATA_ECAT_ATA_BUS;
+
+ if (err_mask & AC_ERR_TIMEOUT)
+ return base + ATA_ECAT_TOUT_HSM;
+
+ if (eflags & ATA_EFLAG_IS_IO) {
+ if (err_mask & AC_ERR_HSM)
+ return base + ATA_ECAT_TOUT_HSM;
+ if ((err_mask &
+ (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
+ return base + ATA_ECAT_UNK_DEV;
+ }
+
+ return 0;
+}
+
+struct speed_down_verdict_arg {
+ u64 since;
+ int xfer_ok;
+ int nr_errors[ATA_ECAT_NR];
+};
+
+static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
+{
+ struct speed_down_verdict_arg *arg = void_arg;
+ int cat;
+
+ if (ent->timestamp < arg->since)
+ return -1;
+
+ cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
+ &arg->xfer_ok);
+ arg->nr_errors[cat]++;
+
+ return 0;
+}
+
+/**
+ * ata_eh_speed_down_verdict - Determine speed down verdict
+ * @dev: Device of interest
+ *
+ * This function examines error ring of @dev and determines
+ * whether NCQ needs to be turned off, transfer speed should be
+ * stepped down, or falling back to PIO is necessary.
+ *
+ * ECAT_ATA_BUS : ATA_BUS error for any command
+ *
+ * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
+ * IO commands
+ *
+ * ECAT_UNK_DEV : Unknown DEV error for IO commands
+ *
+ * ECAT_DUBIOUS_* : Identical to above three but occurred while
+ * data transfer hasn't been verified.
+ *
+ * Verdicts are
+ *
+ * NCQ_OFF : Turn off NCQ.
+ *
+ * SPEED_DOWN : Speed down transfer speed but don't fall back
+ * to PIO.
+ *
+ * FALLBACK_TO_PIO : Fall back to PIO.
+ *
+ * Even if multiple verdicts are returned, only one action is
+ * taken per error. An action triggered by non-DUBIOUS errors
+ * clears ering, while one triggered by DUBIOUS_* errors doesn't.
+ * This is to expedite speed down decisions right after device is
+ * initially configured.
+ *
+ * The followings are speed down rules. #1 and #2 deal with
+ * DUBIOUS errors.
+ *
+ * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
+ * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
+ *
+ * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
+ * occurred during last 5 mins, NCQ_OFF.
+ *
+ * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
+ * ocurred during last 5 mins, FALLBACK_TO_PIO
+ *
+ * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
+ * during last 10 mins, NCQ_OFF.
+ *
+ * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
+ * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * OR of ATA_EH_SPDN_* flags.
+ */
+static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
+{
+ const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
+ u64 j64 = get_jiffies_64();
+ struct speed_down_verdict_arg arg;
+ unsigned int verdict = 0;
+
+ /* scan past 5 mins of error history */
+ memset(&arg, 0, sizeof(arg));
+ arg.since = j64 - min(j64, j5mins);
+ ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+
+ if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
+ arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
+ verdict |= ATA_EH_SPDN_SPEED_DOWN |
+ ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
+
+ if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
+ arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
+ verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
+
+ if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
+ arg.nr_errors[ATA_ECAT_TOUT_HSM] +
+ arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
+ verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
+
+ /* scan past 10 mins of error history */
+ memset(&arg, 0, sizeof(arg));
+ arg.since = j64 - min(j64, j10mins);
+ ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
+
+ if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
+ arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
+ verdict |= ATA_EH_SPDN_NCQ_OFF;
+
+ if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
+ arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
+ arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
+ verdict |= ATA_EH_SPDN_SPEED_DOWN;
+
+ return verdict;
+}
+
+/**
+ * ata_eh_speed_down - record error and speed down if necessary
+ * @dev: Failed device
+ * @eflags: mask of ATA_EFLAG_* flags
+ * @err_mask: err_mask of the error
+ *
+ * Record error and examine error history to determine whether
+ * adjusting transmission speed is necessary. It also sets
+ * transmission limits appropriately if such adjustment is
+ * necessary.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Determined recovery action.
+ */
+static unsigned int ata_eh_speed_down(struct ata_device *dev,
+ unsigned int eflags, unsigned int err_mask)
+{
+ struct ata_link *link = ata_dev_phys_link(dev);
+ int xfer_ok = 0;
+ unsigned int verdict;
+ unsigned int action = 0;
+
+ /* don't bother if Cat-0 error */
+ if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
+ return 0;
+
+ /* record error and determine whether speed down is necessary */
+ ata_ering_record(&dev->ering, eflags, err_mask);
+ verdict = ata_eh_speed_down_verdict(dev);
+
+ /* turn off NCQ? */
+ if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
+ (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
+ ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
+ dev->flags |= ATA_DFLAG_NCQ_OFF;
+ ata_dev_printk(dev, KERN_WARNING,
+ "NCQ disabled due to excessive errors\n");
+ goto done;
+ }
+
+ /* speed down? */
+ if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
+ /* speed down SATA link speed if possible */
+ if (sata_down_spd_limit(link) == 0) {
+ action |= ATA_EH_RESET;
+ goto done;
+ }
+
+ /* lower transfer mode */
+ if (dev->spdn_cnt < 2) {
+ static const int dma_dnxfer_sel[] =
+ { ATA_DNXFER_DMA, ATA_DNXFER_40C };
+ static const int pio_dnxfer_sel[] =
+ { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
+ int sel;
+
+ if (dev->xfer_shift != ATA_SHIFT_PIO)
+ sel = dma_dnxfer_sel[dev->spdn_cnt];
+ else
+ sel = pio_dnxfer_sel[dev->spdn_cnt];
+
+ dev->spdn_cnt++;
+
+ if (ata_down_xfermask_limit(dev, sel) == 0) {
+ action |= ATA_EH_RESET;
+ goto done;
+ }
+ }
+ }
+
+ /* Fall back to PIO? Slowing down to PIO is meaningless for
+ * SATA ATA devices. Consider it only for PATA and SATAPI.
+ */
+ if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
+ (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
+ (dev->xfer_shift != ATA_SHIFT_PIO)) {
+ if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
+ dev->spdn_cnt = 0;
+ action |= ATA_EH_RESET;
+ goto done;
+ }
+ }
+
+ return 0;
+ done:
+ /* device has been slowed down, blow error history */
+ if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
+ ata_ering_clear(&dev->ering);
+ return action;
+}
+
+/**
+ * ata_eh_link_autopsy - analyze error and determine recovery action
+ * @link: host link to perform autopsy on
+ *
+ * Analyze why @link failed and determine which recovery actions
+ * are needed. This function also sets more detailed AC_ERR_*
+ * values and fills sense data for ATAPI CHECK SENSE.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_link_autopsy(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
+ unsigned int all_err_mask = 0, eflags = 0;
+ int tag;
+ u32 serror;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
+ return;
+
+ /* obtain and analyze SError */
+ rc = sata_scr_read(link, SCR_ERROR, &serror);
+ if (rc == 0) {
+ ehc->i.serror |= serror;
+ ata_eh_analyze_serror(link);
+ } else if (rc != -EOPNOTSUPP) {
+ /* SError read failed, force reset and probing */
+ ehc->i.probe_mask |= ATA_ALL_DEVICES;
+ ehc->i.action |= ATA_EH_RESET;
+ ehc->i.err_mask |= AC_ERR_OTHER;
+ }
+
+ /* analyze NCQ failure */
+ ata_eh_analyze_ncq_error(link);
+
+ /* any real error trumps AC_ERR_OTHER */
+ if (ehc->i.err_mask & ~AC_ERR_OTHER)
+ ehc->i.err_mask &= ~AC_ERR_OTHER;
+
+ all_err_mask |= ehc->i.err_mask;
+
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+ if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ ata_dev_phys_link(qc->dev) != link)
+ continue;
+
+ /* inherit upper level err_mask */
+ qc->err_mask |= ehc->i.err_mask;
+
+ /* analyze TF */
+ ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
+
+ /* DEV errors are probably spurious in case of ATA_BUS error */
+ if (qc->err_mask & AC_ERR_ATA_BUS)
+ qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
+ AC_ERR_INVALID);
+
+ /* any real error trumps unknown error */
+ if (qc->err_mask & ~AC_ERR_OTHER)
+ qc->err_mask &= ~AC_ERR_OTHER;
+
+ /* SENSE_VALID trumps dev/unknown error and revalidation */
+ if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+ qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
+
+ /* determine whether the command is worth retrying */
+ if (!(qc->err_mask & AC_ERR_INVALID) &&
+ ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
+ qc->flags |= ATA_QCFLAG_RETRY;
+
+ /* accumulate error info */
+ ehc->i.dev = qc->dev;
+ all_err_mask |= qc->err_mask;
+ if (qc->flags & ATA_QCFLAG_IO)
+ eflags |= ATA_EFLAG_IS_IO;
+ }
+
+ /* enforce default EH actions */
+ if (ap->pflags & ATA_PFLAG_FROZEN ||
+ all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
+ ehc->i.action |= ATA_EH_RESET;
+ else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
+ (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
+ ehc->i.action |= ATA_EH_REVALIDATE;
+
+ /* If we have offending qcs and the associated failed device,
+ * perform per-dev EH action only on the offending device.
+ */
+ if (ehc->i.dev) {
+ ehc->i.dev_action[ehc->i.dev->devno] |=
+ ehc->i.action & ATA_EH_PERDEV_MASK;
+ ehc->i.action &= ~ATA_EH_PERDEV_MASK;
+ }
+
+ /* propagate timeout to host link */
+ if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
+ ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
+
+ /* record error and consider speeding down */
+ dev = ehc->i.dev;
+ if (!dev && ((ata_link_max_devices(link) == 1 &&
+ ata_dev_enabled(link->device))))
+ dev = link->device;
+
+ if (dev) {
+ if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
+ eflags |= ATA_EFLAG_DUBIOUS_XFER;
+ ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+ }
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_eh_autopsy - analyze error and determine recovery action
+ * @ap: host port to perform autopsy on
+ *
+ * Analyze all links of @ap and determine why they failed and
+ * which recovery actions are needed.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_eh_autopsy(struct ata_port *ap)
+{
+ struct ata_link *link;
+
+ ata_port_for_each_link(link, ap)
+ ata_eh_link_autopsy(link);
+
+ /* Handle the frigging slave link. Autopsy is done similarly
+ * but actions and flags are transferred over to the master
+ * link and handled from there.
+ */
+ if (ap->slave_link) {
+ struct ata_eh_context *mehc = &ap->link.eh_context;
+ struct ata_eh_context *sehc = &ap->slave_link->eh_context;
+
+ /* transfer control flags from master to slave */
+ sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
+
+ /* perform autopsy on the slave link */
+ ata_eh_link_autopsy(ap->slave_link);
+
+ /* transfer actions from slave to master and clear slave */
+ ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+ mehc->i.action |= sehc->i.action;
+ mehc->i.dev_action[1] |= sehc->i.dev_action[1];
+ mehc->i.flags |= sehc->i.flags;
+ ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+ }
+
+ /* Autopsy of fanout ports can affect host link autopsy.
+ * Perform host link autopsy last.
+ */
+ if (sata_pmp_attached(ap))
+ ata_eh_link_autopsy(&ap->link);
+}
+
+/**
+ * ata_eh_link_report - report error handling to user
+ * @link: ATA link EH is going on
+ *
+ * Report EH to user.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_eh_link_report(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ const char *frozen, *desc;
+ char tries_buf[6];
+ int tag, nr_failed = 0;
+
+ if (ehc->i.flags & ATA_EHI_QUIET)
+ return;
+
+ desc = NULL;
+ if (ehc->i.desc[0] != '\0')
+ desc = ehc->i.desc;
+
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+ if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ ata_dev_phys_link(qc->dev) != link ||
+ ((qc->flags & ATA_QCFLAG_QUIET) &&
+ qc->err_mask == AC_ERR_DEV))
+ continue;
+ if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
+ continue;
+
+ nr_failed++;
+ }
+
+ if (!nr_failed && !ehc->i.err_mask)
+ return;
+
+ frozen = "";
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ frozen = " frozen";
+
+ memset(tries_buf, 0, sizeof(tries_buf));
+ if (ap->eh_tries < ATA_EH_MAX_TRIES)
+ snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
+ ap->eh_tries);
+
+ if (ehc->i.dev) {
+ ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
+ if (desc)
+ ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
+ } else {
+ ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
+ if (desc)
+ ata_link_printk(link, KERN_ERR, "%s\n", desc);
+ }
+
+ if (ehc->i.serror)
+ ata_link_printk(link, KERN_ERR,
+ "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
+ ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
+ ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
+ ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
+ ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
+ ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
+ ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
+ ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
+ ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
+ ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
+ ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
+ ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
+ ehc->i.serror & SERR_CRC ? "BadCRC " : "",
+ ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
+ ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
+ ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
+ ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
+ ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+ struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
+ const u8 *cdb = qc->cdb;
+ char data_buf[20] = "";
+ char cdb_buf[70] = "";
+
+ if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
+ continue;
+
+ if (qc->dma_dir != DMA_NONE) {
+ static const char *dma_str[] = {
+ [DMA_BIDIRECTIONAL] = "bidi",
+ [DMA_TO_DEVICE] = "out",
+ [DMA_FROM_DEVICE] = "in",
+ };
+ static const char *prot_str[] = {
+ [ATA_PROT_PIO] = "pio",
+ [ATA_PROT_DMA] = "dma",
+ [ATA_PROT_NCQ] = "ncq",
+ [ATAPI_PROT_PIO] = "pio",
+ [ATAPI_PROT_DMA] = "dma",
+ };
+
+ snprintf(data_buf, sizeof(data_buf), " %s %u %s",
+ prot_str[qc->tf.protocol], qc->nbytes,
+ dma_str[qc->dma_dir]);
+ }
+
+ if (ata_is_atapi(qc->tf.protocol))
+ snprintf(cdb_buf, sizeof(cdb_buf),
+ "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
+ cdb[0], cdb[1], cdb[2], cdb[3],
+ cdb[4], cdb[5], cdb[6], cdb[7],
+ cdb[8], cdb[9], cdb[10], cdb[11],
+ cdb[12], cdb[13], cdb[14], cdb[15]);
+
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+ "tag %d%s\n %s"
+ "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
+ "Emask 0x%x (%s)%s\n",
+ cmd->command, cmd->feature, cmd->nsect,
+ cmd->lbal, cmd->lbam, cmd->lbah,
+ cmd->hob_feature, cmd->hob_nsect,
+ cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
+ cmd->device, qc->tag, data_buf, cdb_buf,
+ res->command, res->feature, res->nsect,
+ res->lbal, res->lbam, res->lbah,
+ res->hob_feature, res->hob_nsect,
+ res->hob_lbal, res->hob_lbam, res->hob_lbah,
+ res->device, qc->err_mask, ata_err_string(qc->err_mask),
+ qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
+
+ if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+ ATA_ERR)) {
+ if (res->command & ATA_BUSY)
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "status: { Busy }\n");
+ else
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "status: { %s%s%s%s}\n",
+ res->command & ATA_DRDY ? "DRDY " : "",
+ res->command & ATA_DF ? "DF " : "",
+ res->command & ATA_DRQ ? "DRQ " : "",
+ res->command & ATA_ERR ? "ERR " : "");
+ }
+
+ if (cmd->command != ATA_CMD_PACKET &&
+ (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
+ ATA_ABORTED)))
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "error: { %s%s%s%s}\n",
+ res->feature & ATA_ICRC ? "ICRC " : "",
+ res->feature & ATA_UNC ? "UNC " : "",
+ res->feature & ATA_IDNF ? "IDNF " : "",
+ res->feature & ATA_ABORTED ? "ABRT " : "");
+ }
+}
+
+/**
+ * ata_eh_report - report error handling to user
+ * @ap: ATA port to report EH about
+ *
+ * Report EH to user.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_report(struct ata_port *ap)
+{
+ struct ata_link *link;
+
+ __ata_port_for_each_link(link, ap)
+ ata_eh_link_report(link);
+}
+
+static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
+ unsigned int *classes, unsigned long deadline,
+ bool clear_classes)
+{
+ struct ata_device *dev;
+
+ if (clear_classes)
+ ata_link_for_each_dev(dev, link)
+ classes[dev->devno] = ATA_DEV_UNKNOWN;
+
+ return reset(link, classes, deadline);
+}
+
+static int ata_eh_followup_srst_needed(struct ata_link *link,
+ int rc, const unsigned int *classes)
+{
+ if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
+ return 0;
+ if (rc == -EAGAIN)
+ return 1;
+ if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
+ return 1;
+ return 0;
+}
+
+int ata_eh_reset(struct ata_link *link, int classify,
+ ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+ ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_link *slave = ap->slave_link;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_eh_context *sehc = &slave->eh_context;
+ unsigned int *classes = ehc->classes;
+ unsigned int lflags = link->flags;
+ int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
+ int max_tries = 0, try = 0;
+ struct ata_link *failed_link;
+ struct ata_device *dev;
+ unsigned long deadline, now;
+ ata_reset_fn_t reset;
+ unsigned long flags;
+ u32 sstatus;
+ int nr_unknown, rc;
+
+ /*
+ * Prepare to reset
+ */
+ while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
+ max_tries++;
+ if (link->flags & ATA_LFLAG_NO_HRST)
+ hardreset = NULL;
+ if (link->flags & ATA_LFLAG_NO_SRST)
+ softreset = NULL;
+
+ /* make sure each reset attemp is at least COOL_DOWN apart */
+ if (ehc->i.flags & ATA_EHI_DID_RESET) {
+ now = jiffies;
+ WARN_ON(time_after(ehc->last_reset, now));
+ deadline = ata_deadline(ehc->last_reset,
+ ATA_EH_RESET_COOL_DOWN);
+ if (time_before(now, deadline))
+ schedule_timeout_uninterruptible(deadline - now);
+ }
+
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags |= ATA_PFLAG_RESETTING;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+
+ ata_link_for_each_dev(dev, link) {
+ /* If we issue an SRST then an ATA drive (not ATAPI)
+ * may change configuration and be in PIO0 timing. If
+ * we do a hard reset (or are coming from power on)
+ * this is true for ATA or ATAPI. Until we've set a
+ * suitable controller mode we should not touch the
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+ * touch the DMA setup as that will be dealt with when
+ * configuring devices.
+ */
+ if (ap->ops->set_piomode)
+ ap->ops->set_piomode(ap, dev);
+ }
+
+ /* prefer hardreset */
+ reset = NULL;
+ ehc->i.action &= ~ATA_EH_RESET;
+ if (hardreset) {
+ reset = hardreset;
+ ehc->i.action |= ATA_EH_HARDRESET;
+ } else if (softreset) {
+ reset = softreset;
+ ehc->i.action |= ATA_EH_SOFTRESET;
+ }
+
+ if (prereset) {
+ unsigned long deadline = ata_deadline(jiffies,
+ ATA_EH_PRERESET_TIMEOUT);
+
+ if (slave) {
+ sehc->i.action &= ~ATA_EH_RESET;
+ sehc->i.action |= ehc->i.action;
+ }
+
+ rc = prereset(link, deadline);
+
+ /* If present, do prereset on slave link too. Reset
+ * is skipped iff both master and slave links report
+ * -ENOENT or clear ATA_EH_RESET.
+ */
+ if (slave && (rc == 0 || rc == -ENOENT)) {
+ int tmp;
+
+ tmp = prereset(slave, deadline);
+ if (tmp != -ENOENT)
+ rc = tmp;
+
+ ehc->i.action |= sehc->i.action;
+ }
+
+ if (rc) {
+ if (rc == -ENOENT) {
+ ata_link_printk(link, KERN_DEBUG,
+ "port disabled. ignoring.\n");
+ ehc->i.action &= ~ATA_EH_RESET;
+
+ ata_link_for_each_dev(dev, link)
+ classes[dev->devno] = ATA_DEV_NONE;
+
+ rc = 0;
+ } else
+ ata_link_printk(link, KERN_ERR,
+ "prereset failed (errno=%d)\n", rc);
+ goto out;
+ }
+
+ /* prereset() might have cleared ATA_EH_RESET. If so,
+ * bang classes, thaw and return.
+ */
+ if (reset && !(ehc->i.action & ATA_EH_RESET)) {
+ ata_link_for_each_dev(dev, link)
+ classes[dev->devno] = ATA_DEV_NONE;
+ if ((ap->pflags & ATA_PFLAG_FROZEN) &&
+ ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
+ rc = 0;
+ goto out;
+ }
+ }
+
+ retry:
+ /*
+ * Perform reset
+ */
+ if (ata_is_host_link(link))
+ ata_eh_freeze_port(ap);
+
+ deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
+
+ if (reset) {
+ if (verbose)
+ ata_link_printk(link, KERN_INFO, "%s resetting link\n",
+ reset == softreset ? "soft" : "hard");
+
+ /* mark that this EH session started with reset */
+ ehc->last_reset = jiffies;
+ if (reset == hardreset)
+ ehc->i.flags |= ATA_EHI_DID_HARDRESET;
+ else
+ ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
+
+ rc = ata_do_reset(link, reset, classes, deadline, true);
+ if (rc && rc != -EAGAIN) {
+ failed_link = link;
+ goto fail;
+ }
+
+ /* hardreset slave link if existent */
+ if (slave && reset == hardreset) {
+ int tmp;
+
+ if (verbose)
+ ata_link_printk(slave, KERN_INFO,
+ "hard resetting link\n");
+
+ ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
+ tmp = ata_do_reset(slave, reset, classes, deadline,
+ false);
+ switch (tmp) {
+ case -EAGAIN:
+ rc = -EAGAIN;
+ case 0:
+ break;
+ default:
+ failed_link = slave;
+ rc = tmp;
+ goto fail;
+ }
+ }
+
+ /* perform follow-up SRST if necessary */
+ if (reset == hardreset &&
+ ata_eh_followup_srst_needed(link, rc, classes)) {
+ reset = softreset;
+
+ if (!reset) {
+ ata_link_printk(link, KERN_ERR,
+ "follow-up softreset required "
+ "but no softreset avaliable\n");
+ failed_link = link;
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+ rc = ata_do_reset(link, reset, classes, deadline, true);
+ }
+ } else {
+ if (verbose)
+ ata_link_printk(link, KERN_INFO, "no reset method "
+ "available, skipping reset\n");
+ if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
+ lflags |= ATA_LFLAG_ASSUME_ATA;
+ }
+
+ /*
+ * Post-reset processing
+ */
+ ata_link_for_each_dev(dev, link) {
+ /* After the reset, the device state is PIO 0 and the
+ * controller state is undefined. Reset also wakes up
+ * drives from sleeping mode.
+ */
+ dev->pio_mode = XFER_PIO_0;
+ dev->flags &= ~ATA_DFLAG_SLEEPING;
+
+ if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ /* apply class override */
+ if (lflags & ATA_LFLAG_ASSUME_ATA)
+ classes[dev->devno] = ATA_DEV_ATA;
+ else if (lflags & ATA_LFLAG_ASSUME_SEMB)
+ classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
+ } else
+ classes[dev->devno] = ATA_DEV_NONE;
+ }
+
+ /* record current link speed */
+ if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
+ link->sata_spd = (sstatus >> 4) & 0xf;
+ if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
+ slave->sata_spd = (sstatus >> 4) & 0xf;
+
+ /* thaw the port */
+ if (ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
+
+ /* postreset() should clear hardware SError. Although SError
+ * is cleared during link resume, clearing SError here is
+ * necessary as some PHYs raise hotplug events after SRST.
+ * This introduces race condition where hotplug occurs between
+ * reset and here. This race is mediated by cross checking
+ * link onlineness and classification result later.
+ */
+ if (postreset) {
+ postreset(link, classes);
+ if (slave)
+ postreset(slave, classes);
+ }
+
+ /* clear cached SError */
+ spin_lock_irqsave(link->ap->lock, flags);
+ link->eh_info.serror = 0;
+ if (slave)
+ slave->eh_info.serror = 0;
+ spin_unlock_irqrestore(link->ap->lock, flags);
+
+ /* Make sure onlineness and classification result correspond.
+ * Hotplug could have happened during reset and some
+ * controllers fail to wait while a drive is spinning up after
+ * being hotplugged causing misdetection. By cross checking
+ * link onlineness and classification result, those conditions
+ * can be reliably detected and retried.
+ */
+ nr_unknown = 0;
+ ata_link_for_each_dev(dev, link) {
+ /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
+ if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
+ classes[dev->devno] = ATA_DEV_NONE;
+ if (ata_phys_link_online(ata_dev_phys_link(dev)))
+ nr_unknown++;
+ }
+ }
+
+ if (classify && nr_unknown) {
+ if (try < max_tries) {
+ ata_link_printk(link, KERN_WARNING, "link online but "
+ "device misclassified, retrying\n");
+ failed_link = link;
+ rc = -EAGAIN;
+ goto fail;
+ }
+ ata_link_printk(link, KERN_WARNING,
+ "link online but device misclassified, "
+ "device detection might fail\n");
+ }
+
+ /* reset successful, schedule revalidation */
+ ata_eh_done(link, NULL, ATA_EH_RESET);
+ if (slave)
+ ata_eh_done(slave, NULL, ATA_EH_RESET);
+ ehc->last_reset = jiffies; /* update to completion time */
+ ehc->i.action |= ATA_EH_REVALIDATE;
+
+ rc = 0;
+ out:
+ /* clear hotplug flag */
+ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+ if (slave)
+ sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~ATA_PFLAG_RESETTING;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+
+ fail:
+ /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
+ if (!ata_is_host_link(link) &&
+ sata_scr_read(link, SCR_STATUS, &sstatus))
+ rc = -ERESTART;
+
+ if (rc == -ERESTART || try >= max_tries)
+ goto out;
+
+ now = jiffies;
+ if (time_before(now, deadline)) {
+ unsigned long delta = deadline - now;
+
+ ata_link_printk(failed_link, KERN_WARNING,
+ "reset failed (errno=%d), retrying in %u secs\n",
+ rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
+
+ while (delta)
+ delta = schedule_timeout_uninterruptible(delta);
+ }
+
+ if (try == max_tries - 1) {
+ sata_down_spd_limit(link);
+ if (slave)
+ sata_down_spd_limit(slave);
+ } else if (rc == -EPIPE)
+ sata_down_spd_limit(failed_link);
+
+ if (hardreset)
+ reset = hardreset;
+ goto retry;
+}
+
+static inline void ata_eh_pull_park_action(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags;
+
+ /*
+ * This function can be thought of as an extended version of
+ * ata_eh_about_to_do() specially crafted to accommodate the
+ * requirements of ATA_EH_PARK handling. Since the EH thread
+ * does not leave the do {} while () loop in ata_eh_recover as
+ * long as the timeout for a park request to *one* device on
+ * the port has not expired, and since we still want to pick
+ * up park requests to other devices on the same port or
+ * timeout updates for the same device, we have to pull
+ * ATA_EH_PARK actions from eh_info into eh_context.i
+ * ourselves at the beginning of each pass over the loop.
+ *
+ * Additionally, all write accesses to &ap->park_req_pending
+ * through INIT_COMPLETION() (see below) or complete_all()
+ * (see ata_scsi_park_store()) are protected by the host lock.
+ * As a result we have that park_req_pending.done is zero on
+ * exit from this function, i.e. when ATA_EH_PARK actions for
+ * *all* devices on port ap have been pulled into the
+ * respective eh_context structs. If, and only if,
+ * park_req_pending.done is non-zero by the time we reach
+ * wait_for_completion_timeout(), another ATA_EH_PARK action
+ * has been scheduled for at least one of the devices on port
+ * ap and we have to cycle over the do {} while () loop in
+ * ata_eh_recover() again.
+ */
+
+ spin_lock_irqsave(ap->lock, flags);
+ INIT_COMPLETION(ap->park_req_pending);
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_info *ehi = &link->eh_info;
+
+ link->eh_context.i.dev_action[dev->devno] |=
+ ehi->dev_action[dev->devno] & ATA_EH_PARK;
+ ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
+ }
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ if (park) {
+ ehc->unloaded_mask |= 1 << dev->devno;
+ tf.command = ATA_CMD_IDLEIMMEDIATE;
+ tf.feature = 0x44;
+ tf.lbal = 0x4c;
+ tf.lbam = 0x4e;
+ tf.lbah = 0x55;
+ } else {
+ ehc->unloaded_mask &= ~(1 << dev->devno);
+ tf.command = ATA_CMD_CHK_POWER;
+ }
+
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (park && (err_mask || tf.lbal != 0xc4)) {
+ ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
+ ehc->unloaded_mask &= ~(1 << dev->devno);
+ }
+}
+
+static int ata_eh_revalidate_and_attach(struct ata_link *link,
+ struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
+ unsigned int new_mask = 0;
+ unsigned long flags;
+ int rc = 0;
+
+ DPRINTK("ENTER\n");
+
+ /* For PATA drive side cable detection to work, IDENTIFY must
+ * be done backwards such that PDIAG- is released by the slave
+ * device before the master device is identified.
+ */
+ ata_link_for_each_dev_reverse(dev, link) {
+ unsigned int action = ata_eh_dev_action(dev);
+ unsigned int readid_flags = 0;
+
+ if (ehc->i.flags & ATA_EHI_DID_RESET)
+ readid_flags |= ATA_READID_POSTRESET;
+
+ if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
+ WARN_ON(dev->class == ATA_DEV_PMP);
+
+ if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ rc = -EIO;
+ goto err;
+ }
+
+ ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
+ rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
+ readid_flags);
+ if (rc)
+ goto err;
+
+ ata_eh_done(link, dev, ATA_EH_REVALIDATE);
+
+ /* Configuration may have changed, reconfigure
+ * transfer mode.
+ */
+ ehc->i.flags |= ATA_EHI_SETMODE;
+
+ /* schedule the scsi_rescan_device() here */
+ queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
+ } else if (dev->class == ATA_DEV_UNKNOWN &&
+ ehc->tries[dev->devno] &&
+ ata_class_enabled(ehc->classes[dev->devno])) {
+ dev->class = ehc->classes[dev->devno];
+
+ if (dev->class == ATA_DEV_PMP)
+ rc = sata_pmp_attach(dev);
+ else
+ rc = ata_dev_read_id(dev, &dev->class,
+ readid_flags, dev->id);
+ switch (rc) {
+ case 0:
+ new_mask |= 1 << dev->devno;
+ break;
+ case -ENOENT:
+ /* IDENTIFY was issued to non-existent
+ * device. No need to reset. Just
+ * thaw and kill the device.
+ */
+ ata_eh_thaw_port(ap);
+ dev->class = ATA_DEV_UNKNOWN;
+ break;
+ default:
+ dev->class = ATA_DEV_UNKNOWN;
+ goto err;
+ }
+ }
+ }
+
+ /* PDIAG- should have been released, ask cable type if post-reset */
+ if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
+ if (ap->ops->cable_detect)
+ ap->cbl = ap->ops->cable_detect(ap);
+ ata_force_cbl(ap);
+ }
+
+ /* Configure new devices forward such that user doesn't see
+ * device detection messages backwards.
+ */
+ ata_link_for_each_dev(dev, link) {
+ if (!(new_mask & (1 << dev->devno)) ||
+ dev->class == ATA_DEV_PMP)
+ continue;
+
+ ehc->i.flags |= ATA_EHI_PRINTINFO;
+ rc = ata_dev_configure(dev);
+ ehc->i.flags &= ~ATA_EHI_PRINTINFO;
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* new device discovered, configure xfermode */
+ ehc->i.flags |= ATA_EHI_SETMODE;
+ }
+
+ return 0;
+
+ err:
+ *r_failed_dev = dev;
+ DPRINTK("EXIT rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * @link: link on which timings will be programmed
+ * @r_failed_dev: out paramter for failed device
+ *
+ * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
+ * ata_set_mode() fails, pointer to the failing device is
+ * returned in @r_failed_dev.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * 0 on success, negative errno otherwise
+ */
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *dev;
+ int rc;
+
+ /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
+ struct ata_ering_entry *ent;
+
+ ent = ata_ering_top(&dev->ering);
+ if (ent)
+ ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
+ }
+ }
+
+ /* has private set_mode? */
+ if (ap->ops->set_mode)
+ rc = ap->ops->set_mode(link, r_failed_dev);
+ else
+ rc = ata_do_set_mode(link, r_failed_dev);
+
+ /* if transfer mode has changed, set DUBIOUS_XFER on device */
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
+ u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
+
+ if (!ata_dev_enabled(dev))
+ continue;
+
+ if (dev->xfer_mode != saved_xfer_mode ||
+ ata_ncq_enabled(dev) != saved_ncq)
+ dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
+ }
+
+ return rc;
+}
+
+/**
+ * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
+ * @dev: ATAPI device to clear UA for
+ *
+ * Resets and other operations can make an ATAPI device raise
+ * UNIT ATTENTION which causes the next operation to fail. This
+ * function clears UA.
+ *
+ * LOCKING:
+ * EH context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int atapi_eh_clear_ua(struct ata_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ATA_EH_UA_TRIES; i++) {
+ u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ u8 sense_key = 0;
+ unsigned int err_mask;
+
+ err_mask = atapi_eh_tur(dev, &sense_key);
+ if (err_mask != 0 && err_mask != AC_ERR_DEV) {
+ ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
+ "failed (err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ if (!err_mask || sense_key != UNIT_ATTENTION)
+ return 0;
+
+ err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_WARNING, "failed to clear "
+ "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+ }
+
+ ata_dev_printk(dev, KERN_WARNING,
+ "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
+
+ return 0;
+}
+
+static int ata_link_nr_enabled(struct ata_link *link)
+{
+ struct ata_device *dev;
+ int cnt = 0;
+
+ ata_link_for_each_dev(dev, link)
+ if (ata_dev_enabled(dev))
+ cnt++;
+ return cnt;
+}
+
+static int ata_link_nr_vacant(struct ata_link *link)
+{
+ struct ata_device *dev;
+ int cnt = 0;
+
+ ata_link_for_each_dev(dev, link)
+ if (dev->class == ATA_DEV_UNKNOWN)
+ cnt++;
+ return cnt;
+}
+
+static int ata_eh_skip_recovery(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
+
+ /* skip disabled links */
+ if (link->flags & ATA_LFLAG_DISABLED)
+ return 1;
+
+ /* thaw frozen port and recover failed devices */
+ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
+ return 0;
+
+ /* reset at least once if reset is requested */
+ if ((ehc->i.action & ATA_EH_RESET) &&
+ !(ehc->i.flags & ATA_EHI_DID_RESET))
+ return 0;
+
+ /* skip if class codes for all vacant slots are ATA_DEV_NONE */
+ ata_link_for_each_dev(dev, link) {
+ if (dev->class == ATA_DEV_UNKNOWN &&
+ ehc->classes[dev->devno] != ATA_DEV_NONE)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ata_eh_schedule_probe(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+
+ if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
+ (ehc->did_probe_mask & (1 << dev->devno)))
+ return 0;
+
+ ata_eh_detach_dev(dev);
+ ata_dev_init(dev);
+ ehc->did_probe_mask |= (1 << dev->devno);
+ ehc->i.action |= ATA_EH_RESET;
+ ehc->saved_xfer_mode[dev->devno] = 0;
+ ehc->saved_ncq_enabled &= ~(1 << dev->devno);
+
+ return 1;
+}
+
+static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+
+ ehc->tries[dev->devno]--;
+
+ switch (err) {
+ case -ENODEV:
+ /* device missing or wrong IDENTIFY data, schedule probing */
+ ehc->i.probe_mask |= (1 << dev->devno);
+ case -EINVAL:
+ /* give it just one more chance */
+ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+ case -EIO:
+ if (ehc->tries[dev->devno] == 1) {
+ /* This is the last chance, better to slow
+ * down than lose it.
+ */
+ sata_down_spd_limit(ata_dev_phys_link(dev));
+ if (dev->pio_mode > XFER_PIO_0)
+ ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+ }
+ }
+
+ if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
+ /* disable device if it has used up all its chances */
+ ata_dev_disable(dev);
+
+ /* detach if offline */
+ if (ata_phys_link_offline(ata_dev_phys_link(dev)))
+ ata_eh_detach_dev(dev);
+
+ /* schedule probe if necessary */
+ if (ata_eh_schedule_probe(dev)) {
+ ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+ memset(ehc->cmd_timeout_idx[dev->devno], 0,
+ sizeof(ehc->cmd_timeout_idx[dev->devno]));
+ }
+
+ return 1;
+ } else {
+ ehc->i.action |= ATA_EH_RESET;
+ return 0;
+ }
+}
+
+/**
+ * ata_eh_recover - recover host port after error
+ * @ap: host port to recover
+ * @prereset: prereset method (can be NULL)
+ * @softreset: softreset method (can be NULL)
+ * @hardreset: hardreset method (can be NULL)
+ * @postreset: postreset method (can be NULL)
+ * @r_failed_link: out parameter for failed link
+ *
+ * This is the alpha and omega, eum and yang, heart and soul of
+ * libata exception handling. On entry, actions required to
+ * recover each link and hotplug requests are recorded in the
+ * link's eh_context. This function executes all the operations
+ * with appropriate retrials and fallbacks to resurrect failed
+ * devices, detach goners and greet newcomers.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ ata_postreset_fn_t postreset,
+ struct ata_link **r_failed_link)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+ int nr_failed_devs;
+ int rc;
+ unsigned long flags, deadline;
+
+ DPRINTK("ENTER\n");
+
+ /* prep for recovery */
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ /* re-enable link? */
+ if (ehc->i.action & ATA_EH_ENABLE_LINK) {
+ ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
+ spin_lock_irqsave(ap->lock, flags);
+ link->flags &= ~ATA_LFLAG_DISABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
+ }
+
+ ata_link_for_each_dev(dev, link) {
+ if (link->flags & ATA_LFLAG_NO_RETRY)
+ ehc->tries[dev->devno] = 1;
+ else
+ ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+
+ /* collect port action mask recorded in dev actions */
+ ehc->i.action |= ehc->i.dev_action[dev->devno] &
+ ~ATA_EH_PERDEV_MASK;
+ ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
+
+ /* process hotplug request */
+ if (dev->flags & ATA_DFLAG_DETACH)
+ ata_eh_detach_dev(dev);
+
+ /* schedule probe if necessary */
+ if (!ata_dev_enabled(dev))
+ ata_eh_schedule_probe(dev);
+ }
+ }
+
+ retry:
+ rc = 0;
+ nr_failed_devs = 0;
+
+ /* if UNLOADING, finish immediately */
+ if (ap->pflags & ATA_PFLAG_UNLOADING)
+ goto out;
+
+ /* prep for EH */
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ /* skip EH if possible. */
+ if (ata_eh_skip_recovery(link))
+ ehc->i.action = 0;
+
+ ata_link_for_each_dev(dev, link)
+ ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
+ }
+
+ /* reset */
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ if (!(ehc->i.action & ATA_EH_RESET))
+ continue;
+
+ rc = ata_eh_reset(link, ata_link_nr_vacant(link),
+ prereset, softreset, hardreset, postreset);
+ if (rc) {
+ ata_link_printk(link, KERN_ERR,
+ "reset failed, giving up\n");
+ goto out;
+ }
+ }
+
+ do {
+ unsigned long now;
+
+ /*
+ * clears ATA_EH_PARK in eh_info and resets
+ * ap->park_req_pending
+ */
+ ata_eh_pull_park_action(ap);
+
+ deadline = jiffies;
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ unsigned long tmp;
+
+ if (dev->class != ATA_DEV_ATA)
+ continue;
+ if (!(ehc->i.dev_action[dev->devno] &
+ ATA_EH_PARK))
+ continue;
+ tmp = dev->unpark_deadline;
+ if (time_before(deadline, tmp))
+ deadline = tmp;
+ else if (time_before_eq(tmp, jiffies))
+ continue;
+ if (ehc->unloaded_mask & (1 << dev->devno))
+ continue;
+
+ ata_eh_park_issue_cmd(dev, 1);
+ }
+ }
+
+ now = jiffies;
+ if (time_before_eq(deadline, now))
+ break;
+
+ deadline = wait_for_completion_timeout(&ap->park_req_pending,
+ deadline - now);
+ } while (deadline);
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ if (!(link->eh_context.unloaded_mask &
+ (1 << dev->devno)))
+ continue;
+
+ ata_eh_park_issue_cmd(dev, 0);
+ ata_eh_done(link, dev, ATA_EH_PARK);
+ }
+ }
+
+ /* the rest */
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ /* revalidate existing devices and attach new ones */
+ rc = ata_eh_revalidate_and_attach(link, &dev);
+ if (rc)
+ goto dev_fail;
+
+ /* if PMP got attached, return, pmp EH will take care of it */
+ if (link->device->class == ATA_DEV_PMP) {
+ ehc->i.action = 0;
+ return 0;
+ }
+
+ /* configure transfer mode if necessary */
+ if (ehc->i.flags & ATA_EHI_SETMODE) {
+ rc = ata_set_mode(link, &dev);
+ if (rc)
+ goto dev_fail;
+ ehc->i.flags &= ~ATA_EHI_SETMODE;
+ }
+
+ /* If reset has been issued, clear UA to avoid
+ * disrupting the current users of the device.
+ */
+ if (ehc->i.flags & ATA_EHI_DID_RESET) {
+ ata_link_for_each_dev(dev, link) {
+ if (dev->class != ATA_DEV_ATAPI)
+ continue;
+ rc = atapi_eh_clear_ua(dev);
+ if (rc)
+ goto dev_fail;
+ }
+ }
+
+ /* configure link power saving */
+ if (ehc->i.action & ATA_EH_LPM)
+ ata_link_for_each_dev(dev, link)
+ ata_dev_enable_pm(dev, ap->pm_policy);
+
+ /* this link is okay now */
+ ehc->i.flags = 0;
+ continue;
+
+dev_fail:
+ nr_failed_devs++;
+ ata_eh_handle_dev_fail(dev, rc);
+
+ if (ap->pflags & ATA_PFLAG_FROZEN) {
+ /* PMP reset requires working host port.
+ * Can't retry if it's frozen.
+ */
+ if (sata_pmp_attached(ap))
+ goto out;
+ break;
+ }
+ }
+
+ if (nr_failed_devs)
+ goto retry;
+
+ out:
+ if (rc && r_failed_link)
+ *r_failed_link = link;
+
+ DPRINTK("EXIT, rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * ata_eh_finish - finish up EH
+ * @ap: host port to finish EH for
+ *
+ * Recovery is complete. Clean up EH states and retry or finish
+ * failed qcs.
+ *
+ * LOCKING:
+ * None.
+ */
+void ata_eh_finish(struct ata_port *ap)
+{
+ int tag;
+
+ /* retry or finish qcs */
+ for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
+ struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+ if (!(qc->flags & ATA_QCFLAG_FAILED))
+ continue;
+
+ if (qc->err_mask) {
+ /* FIXME: Once EH migration is complete,
+ * generate sense data in this function,
+ * considering both err_mask and tf.
+ */
+ if (qc->flags & ATA_QCFLAG_RETRY)
+ ata_eh_qc_retry(qc);
+ else
+ ata_eh_qc_complete(qc);
+ } else {
+ if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+ ata_eh_qc_complete(qc);
+ } else {
+ /* feed zero TF to sense generation */
+ memset(&qc->result_tf, 0, sizeof(qc->result_tf));
+ ata_eh_qc_retry(qc);
+ }
+ }
+ }
+
+ /* make sure nr_active_links is zero after EH */
+ WARN_ON(ap->nr_active_links);
+ ap->nr_active_links = 0;
+}
+
+/**
+ * ata_do_eh - do standard error handling
+ * @ap: host port to handle error for
+ *
+ * @prereset: prereset method (can be NULL)
+ * @softreset: softreset method (can be NULL)
+ * @hardreset: hardreset method (can be NULL)
+ * @postreset: postreset method (can be NULL)
+ *
+ * Perform standard error handling sequence.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
+ ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ ata_postreset_fn_t postreset)
+{
+ struct ata_device *dev;
+ int rc;
+
+ ata_eh_autopsy(ap);
+ ata_eh_report(ap);
+
+ rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
+ NULL);
+ if (rc) {
+ ata_link_for_each_dev(dev, &ap->link)
+ ata_dev_disable(dev);
+ }
+
+ ata_eh_finish(ap);
+}
+
+/**
+ * ata_std_error_handler - standard error handler
+ * @ap: host port to handle error for
+ *
+ * Standard error handler
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_std_error_handler(struct ata_port *ap)
+{
+ struct ata_port_operations *ops = ap->ops;
+ ata_reset_fn_t hardreset = ops->hardreset;
+
+ /* ignore built-in hardreset if SCR access is not available */
+ if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+ hardreset = NULL;
+
+ ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
+}
+
+#ifdef CONFIG_PM
+/**
+ * ata_eh_handle_port_suspend - perform port suspend operation
+ * @ap: port to suspend
+ *
+ * Suspend @ap.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* are we suspending? */
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+ ap->pm_mesg.event == PM_EVENT_ON) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+
+ /* tell ACPI we're suspending */
+ rc = ata_acpi_on_suspend(ap);
+ if (rc)
+ goto out;
+
+ /* suspend */
+ ata_eh_freeze_port(ap);
+
+ if (ap->ops->port_suspend)
+ rc = ap->ops->port_suspend(ap, ap->pm_mesg);
+
+ ata_acpi_set_state(ap, PMSG_SUSPEND);
+ out:
+ /* report result */
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->pflags &= ~ATA_PFLAG_PM_PENDING;
+ if (rc == 0)
+ ap->pflags |= ATA_PFLAG_SUSPENDED;
+ else if (ap->pflags & ATA_PFLAG_FROZEN)
+ ata_port_schedule_eh(ap);
+
+ if (ap->pm_result) {
+ *ap->pm_result = rc;
+ ap->pm_result = NULL;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return;
+}
+
+/**
+ * ata_eh_handle_port_resume - perform port resume operation
+ * @ap: port to resume
+ *
+ * Resume @ap.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* are we resuming? */
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+ ap->pm_mesg.event != PM_EVENT_ON) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
+
+ ata_acpi_set_state(ap, PMSG_ON);
+
+ if (ap->ops->port_resume)
+ rc = ap->ops->port_resume(ap);
+
+ /* tell ACPI that we're resuming */
+ ata_acpi_on_resume(ap);
+
+ /* report result */
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+ if (ap->pm_result) {
+ *ap->pm_result = rc;
+ ap->pm_result = NULL;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
new file mode 100644
index 0000000..b65db30
--- /dev/null
+++ b/drivers/ata/libata-pmp.c
@@ -0,0 +1,1020 @@
+/*
+ * libata-pmp.c - libata port multiplier support
+ *
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include "libata.h"
+
+const struct ata_port_operations sata_pmp_port_ops = {
+ .inherits = &sata_port_ops,
+ .pmp_prereset = ata_std_prereset,
+ .pmp_hardreset = sata_std_hardreset,
+ .pmp_postreset = ata_std_postreset,
+ .error_handler = sata_pmp_error_handler,
+};
+
+/**
+ * sata_pmp_read - read PMP register
+ * @link: link to read PMP register for
+ * @reg: register to read
+ * @r_val: resulting value
+ *
+ * Read PMP register.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *pmp_dev = ap->link.device;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(pmp_dev, &tf);
+ tf.command = ATA_CMD_PMP_READ;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+ tf.feature = reg;
+ tf.device = link->pmp;
+
+ err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+ SATA_PMP_RW_TIMEOUT);
+ if (err_mask)
+ return err_mask;
+
+ *r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
+ return 0;
+}
+
+/**
+ * sata_pmp_write - write PMP register
+ * @link: link to write PMP register for
+ * @reg: register to write
+ * @r_val: value to write
+ *
+ * Write PMP register.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *pmp_dev = ap->link.device;
+ struct ata_taskfile tf;
+
+ ata_tf_init(pmp_dev, &tf);
+ tf.command = ATA_CMD_PMP_WRITE;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+ tf.feature = reg;
+ tf.device = link->pmp;
+ tf.nsect = val & 0xff;
+ tf.lbal = (val >> 8) & 0xff;
+ tf.lbam = (val >> 16) & 0xff;
+ tf.lbah = (val >> 24) & 0xff;
+
+ return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
+ SATA_PMP_RW_TIMEOUT);
+}
+
+/**
+ * sata_pmp_qc_defer_cmd_switch - qc_defer for command switching PMP
+ * @qc: ATA command in question
+ *
+ * A host which has command switching PMP support cannot issue
+ * commands to multiple links simultaneously.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * ATA_DEFER_* if deferring is needed, 0 otherwise.
+ */
+int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc)
+{
+ struct ata_link *link = qc->dev->link;
+ struct ata_port *ap = link->ap;
+
+ if (ap->excl_link == NULL || ap->excl_link == link) {
+ if (ap->nr_active_links == 0 || ata_link_active(link)) {
+ qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+ return ata_std_qc_defer(qc);
+ }
+
+ ap->excl_link = link;
+ }
+
+ return ATA_DEFER_PORT;
+}
+
+/**
+ * sata_pmp_scr_read - read PSCR
+ * @link: ATA link to read PSCR for
+ * @reg: PSCR to read
+ * @r_val: resulting value
+ *
+ * Read PSCR @reg into @r_val for @link, to be called from
+ * ata_scr_read().
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
+{
+ unsigned int err_mask;
+
+ if (reg > SATA_PMP_PSCR_CONTROL)
+ return -EINVAL;
+
+ err_mask = sata_pmp_read(link, reg, r_val);
+ if (err_mask) {
+ ata_link_printk(link, KERN_WARNING, "failed to read SCR %d "
+ "(Emask=0x%x)\n", reg, err_mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * sata_pmp_scr_write - write PSCR
+ * @link: ATA link to write PSCR for
+ * @reg: PSCR to write
+ * @val: value to be written
+ *
+ * Write @val to PSCR @reg for @link, to be called from
+ * ata_scr_write() and ata_scr_write_flush().
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ unsigned int err_mask;
+
+ if (reg > SATA_PMP_PSCR_CONTROL)
+ return -EINVAL;
+
+ err_mask = sata_pmp_write(link, reg, val);
+ if (err_mask) {
+ ata_link_printk(link, KERN_WARNING, "failed to write SCR %d "
+ "(Emask=0x%x)\n", reg, err_mask);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * sata_pmp_read_gscr - read GSCR block of SATA PMP
+ * @dev: PMP device
+ * @gscr: buffer to read GSCR block into
+ *
+ * Read selected PMP GSCRs from the PMP at @dev. This will serve
+ * as configuration and identification info for the PMP.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
+{
+ static const int gscr_to_read[] = { 0, 1, 2, 32, 33, 64, 96 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gscr_to_read); i++) {
+ int reg = gscr_to_read[i];
+ unsigned int err_mask;
+
+ err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to read PMP "
+ "GSCR[%d] (Emask=0x%x)\n", reg, err_mask);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static const char *sata_pmp_spec_rev_str(const u32 *gscr)
+{
+ u32 rev = gscr[SATA_PMP_GSCR_REV];
+
+ if (rev & (1 << 2))
+ return "1.1";
+ if (rev & (1 << 1))
+ return "1.0";
+ return "<unknown>";
+}
+
+static int sata_pmp_configure(struct ata_device *dev, int print_info)
+{
+ struct ata_port *ap = dev->link->ap;
+ u32 *gscr = dev->gscr;
+ unsigned int err_mask = 0;
+ const char *reason;
+ int nr_ports, rc;
+
+ nr_ports = sata_pmp_gscr_ports(gscr);
+
+ if (nr_ports <= 0 || nr_ports > SATA_PMP_MAX_PORTS) {
+ rc = -EINVAL;
+ reason = "invalid nr_ports";
+ goto fail;
+ }
+
+ if ((ap->flags & ATA_FLAG_AN) &&
+ (gscr[SATA_PMP_GSCR_FEAT] & SATA_PMP_FEAT_NOTIFY))
+ dev->flags |= ATA_DFLAG_AN;
+
+ /* monitor SERR_PHYRDY_CHG on fan-out ports */
+ err_mask = sata_pmp_write(dev->link, SATA_PMP_GSCR_ERROR_EN,
+ SERR_PHYRDY_CHG);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to write GSCR_ERROR_EN";
+ goto fail;
+ }
+
+ if (print_info) {
+ ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
+ "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
+ sata_pmp_spec_rev_str(gscr),
+ sata_pmp_gscr_vendor(gscr),
+ sata_pmp_gscr_devid(gscr),
+ sata_pmp_gscr_rev(gscr),
+ nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
+ gscr[SATA_PMP_GSCR_FEAT]);
+
+ if (!(dev->flags & ATA_DFLAG_AN))
+ ata_dev_printk(dev, KERN_INFO,
+ "Asynchronous notification not supported, "
+ "hotplug won't\n work on fan-out "
+ "ports. Use warm-plug instead.\n");
+ }
+
+ return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR,
+ "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
+ reason, err_mask);
+ return rc;
+}
+
+static int sata_pmp_init_links(struct ata_port *ap, int nr_ports)
+{
+ struct ata_link *pmp_link = ap->pmp_link;
+ int i;
+
+ if (!pmp_link) {
+ pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS,
+ GFP_NOIO);
+ if (!pmp_link)
+ return -ENOMEM;
+
+ for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+ ata_link_init(ap, &pmp_link[i], i);
+
+ ap->pmp_link = pmp_link;
+ }
+
+ for (i = 0; i < nr_ports; i++) {
+ struct ata_link *link = &pmp_link[i];
+ struct ata_eh_context *ehc = &link->eh_context;
+
+ link->flags = 0;
+ ehc->i.probe_mask |= ATA_ALL_DEVICES;
+ ehc->i.action |= ATA_EH_RESET;
+ }
+
+ return 0;
+}
+
+static void sata_pmp_quirks(struct ata_port *ap)
+{
+ u32 *gscr = ap->link.device->gscr;
+ u16 vendor = sata_pmp_gscr_vendor(gscr);
+ u16 devid = sata_pmp_gscr_devid(gscr);
+ struct ata_link *link;
+
+ if (vendor == 0x1095 && devid == 0x3726) {
+ /* sil3726 quirks */
+ ata_port_for_each_link(link, ap) {
+ /* Class code report is unreliable and SRST
+ * times out under certain configurations.
+ */
+ if (link->pmp < 5)
+ link->flags |= ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_ATA;
+
+ /* port 5 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 5)
+ link->flags |= ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_SEMB;
+ }
+ } else if (vendor == 0x1095 && devid == 0x4723) {
+ /* sil4723 quirks */
+ ata_port_for_each_link(link, ap) {
+ /* class code report is unreliable */
+ if (link->pmp < 2)
+ link->flags |= ATA_LFLAG_ASSUME_ATA;
+
+ /* the config device at port 2 locks up on SRST */
+ if (link->pmp == 2)
+ link->flags |= ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_ATA;
+ }
+ } else if (vendor == 0x1095 && devid == 0x4726) {
+ /* sil4726 quirks */
+ ata_port_for_each_link(link, ap) {
+ /* Class code report is unreliable and SRST
+ * times out under certain configurations.
+ * Config device can be at port 0 or 5 and
+ * locks up on SRST.
+ */
+ if (link->pmp <= 5)
+ link->flags |= ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_ATA;
+
+ /* Port 6 is for SEMB device which doesn't
+ * like SRST either.
+ */
+ if (link->pmp == 6)
+ link->flags |= ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_SEMB;
+ }
+ } else if (vendor == 0x1095 && (devid == 0x5723 || devid == 0x5733 ||
+ devid == 0x5734 || devid == 0x5744)) {
+ /* sil5723/5744 quirks */
+
+ /* sil5723/5744 has either two or three downstream
+ * ports depending on operation mode. The last port
+ * is empty if any actual IO device is available or
+ * occupied by a pseudo configuration device
+ * otherwise. Don't try hard to recover it.
+ */
+ ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+ }
+}
+
+/**
+ * sata_pmp_attach - attach a SATA PMP device
+ * @dev: SATA PMP device to attach
+ *
+ * Configure and attach SATA PMP device @dev. This function is
+ * also responsible for allocating and initializing PMP links.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int sata_pmp_attach(struct ata_device *dev)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ unsigned long flags;
+ struct ata_link *tlink;
+ int rc;
+
+ /* is it hanging off the right place? */
+ if (!sata_pmp_supported(ap)) {
+ ata_dev_printk(dev, KERN_ERR,
+ "host does not support Port Multiplier\n");
+ return -EINVAL;
+ }
+
+ if (!ata_is_host_link(link)) {
+ ata_dev_printk(dev, KERN_ERR,
+ "Port Multipliers cannot be nested\n");
+ return -EINVAL;
+ }
+
+ if (dev->devno) {
+ ata_dev_printk(dev, KERN_ERR,
+ "Port Multiplier must be the first device\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(link->pmp != 0);
+ link->pmp = SATA_PMP_CTRL_PORT;
+
+ /* read GSCR block */
+ rc = sata_pmp_read_gscr(dev, dev->gscr);
+ if (rc)
+ goto fail;
+
+ /* config PMP */
+ rc = sata_pmp_configure(dev, 1);
+ if (rc)
+ goto fail;
+
+ rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
+ if (rc) {
+ ata_dev_printk(dev, KERN_INFO,
+ "failed to initialize PMP links\n");
+ goto fail;
+ }
+
+ /* attach it */
+ spin_lock_irqsave(ap->lock, flags);
+ WARN_ON(ap->nr_pmp_links);
+ ap->nr_pmp_links = sata_pmp_gscr_ports(dev->gscr);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ sata_pmp_quirks(ap);
+
+ if (ap->ops->pmp_attach)
+ ap->ops->pmp_attach(ap);
+
+ ata_port_for_each_link(tlink, ap)
+ sata_link_init_spd(tlink);
+
+ ata_acpi_associate_sata_port(ap);
+
+ return 0;
+
+ fail:
+ link->pmp = 0;
+ return rc;
+}
+
+/**
+ * sata_pmp_detach - detach a SATA PMP device
+ * @dev: SATA PMP device to detach
+ *
+ * Detach SATA PMP device @dev. This function is also
+ * responsible for deconfiguring PMP links.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void sata_pmp_detach(struct ata_device *dev)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ata_link *tlink;
+ unsigned long flags;
+
+ ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n");
+
+ WARN_ON(!ata_is_host_link(link) || dev->devno ||
+ link->pmp != SATA_PMP_CTRL_PORT);
+
+ if (ap->ops->pmp_detach)
+ ap->ops->pmp_detach(ap);
+
+ ata_port_for_each_link(tlink, ap)
+ ata_eh_detach_dev(tlink->device);
+
+ spin_lock_irqsave(ap->lock, flags);
+ ap->nr_pmp_links = 0;
+ link->pmp = 0;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_acpi_associate_sata_port(ap);
+}
+
+/**
+ * sata_pmp_same_pmp - does new GSCR matches the configured PMP?
+ * @dev: PMP device to compare against
+ * @new_gscr: GSCR block of the new device
+ *
+ * Compare @new_gscr against @dev and determine whether @dev is
+ * the PMP described by @new_gscr.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if @dev matches @new_gscr, 0 otherwise.
+ */
+static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
+{
+ const u32 *old_gscr = dev->gscr;
+ u16 old_vendor, new_vendor, old_devid, new_devid;
+ int old_nr_ports, new_nr_ports;
+
+ old_vendor = sata_pmp_gscr_vendor(old_gscr);
+ new_vendor = sata_pmp_gscr_vendor(new_gscr);
+ old_devid = sata_pmp_gscr_devid(old_gscr);
+ new_devid = sata_pmp_gscr_devid(new_gscr);
+ old_nr_ports = sata_pmp_gscr_ports(old_gscr);
+ new_nr_ports = sata_pmp_gscr_ports(new_gscr);
+
+ if (old_vendor != new_vendor) {
+ ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+ "vendor mismatch '0x%x' != '0x%x'\n",
+ old_vendor, new_vendor);
+ return 0;
+ }
+
+ if (old_devid != new_devid) {
+ ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+ "device ID mismatch '0x%x' != '0x%x'\n",
+ old_devid, new_devid);
+ return 0;
+ }
+
+ if (old_nr_ports != new_nr_ports) {
+ ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
+ "nr_ports mismatch '0x%x' != '0x%x'\n",
+ old_nr_ports, new_nr_ports);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * sata_pmp_revalidate - revalidate SATA PMP
+ * @dev: PMP device to revalidate
+ * @new_class: new class code
+ *
+ * Re-read GSCR block and make sure @dev is still attached to the
+ * port and properly configured.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ u32 *gscr = (void *)ap->sector_buf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
+
+ if (!ata_dev_enabled(dev)) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ /* wrong class? */
+ if (ata_class_enabled(new_class) && new_class != ATA_DEV_PMP) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ /* read GSCR */
+ rc = sata_pmp_read_gscr(dev, gscr);
+ if (rc)
+ goto fail;
+
+ /* is the pmp still there? */
+ if (!sata_pmp_same_pmp(dev, gscr)) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ memcpy(dev->gscr, gscr, sizeof(gscr[0]) * SATA_PMP_GSCR_DWORDS);
+
+ rc = sata_pmp_configure(dev, 0);
+ if (rc)
+ goto fail;
+
+ ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
+
+ DPRINTK("EXIT, rc=0\n");
+ return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR,
+ "PMP revalidation failed (errno=%d)\n", rc);
+ DPRINTK("EXIT, rc=%d\n", rc);
+ return rc;
+}
+
+/**
+ * sata_pmp_revalidate_quick - revalidate SATA PMP quickly
+ * @dev: PMP device to revalidate
+ *
+ * Make sure the attached PMP is accessible.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int sata_pmp_revalidate_quick(struct ata_device *dev)
+{
+ unsigned int err_mask;
+ u32 prod_id;
+
+ err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID "
+ "(Emask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
+ ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n");
+ /* something weird is going on, request full PMP recovery */
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * sata_pmp_eh_recover_pmp - recover PMP
+ * @ap: ATA port PMP is attached to
+ * @prereset: prereset method (can be NULL)
+ * @softreset: softreset method
+ * @hardreset: hardreset method
+ * @postreset: postreset method (can be NULL)
+ *
+ * Recover PMP attached to @ap. Recovery procedure is somewhat
+ * similar to that of ata_eh_recover() except that reset should
+ * always be performed in hard->soft sequence and recovery
+ * failure results in PMP detachment.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
+ ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+ ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+{
+ struct ata_link *link = &ap->link;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev = link->device;
+ int tries = ATA_EH_PMP_TRIES;
+ int detach = 0, rc = 0;
+ int reval_failed = 0;
+
+ DPRINTK("ENTER\n");
+
+ if (dev->flags & ATA_DFLAG_DETACH) {
+ detach = 1;
+ goto fail;
+ }
+
+ retry:
+ ehc->classes[0] = ATA_DEV_UNKNOWN;
+
+ if (ehc->i.action & ATA_EH_RESET) {
+ struct ata_link *tlink;
+
+ /* reset */
+ rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
+ postreset);
+ if (rc) {
+ ata_link_printk(link, KERN_ERR,
+ "failed to reset PMP, giving up\n");
+ goto fail;
+ }
+
+ /* PMP is reset, SErrors cannot be trusted, scan all */
+ ata_port_for_each_link(tlink, ap) {
+ struct ata_eh_context *ehc = &tlink->eh_context;
+
+ ehc->i.probe_mask |= ATA_ALL_DEVICES;
+ ehc->i.action |= ATA_EH_RESET;
+ }
+ }
+
+ /* If revalidation is requested, revalidate and reconfigure;
+ * otherwise, do quick revalidation.
+ */
+ if (ehc->i.action & ATA_EH_REVALIDATE)
+ rc = sata_pmp_revalidate(dev, ehc->classes[0]);
+ else
+ rc = sata_pmp_revalidate_quick(dev);
+
+ if (rc) {
+ tries--;
+
+ if (rc == -ENODEV) {
+ ehc->i.probe_mask |= ATA_ALL_DEVICES;
+ detach = 1;
+ /* give it just two more chances */
+ tries = min(tries, 2);
+ }
+
+ if (tries) {
+ /* consecutive revalidation failures? speed down */
+ if (reval_failed)
+ sata_down_spd_limit(link);
+ else
+ reval_failed = 1;
+
+ ehc->i.action |= ATA_EH_RESET;
+ goto retry;
+ } else {
+ ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
+ "after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
+ goto fail;
+ }
+ }
+
+ /* okay, PMP resurrected */
+ ehc->i.flags = 0;
+
+ DPRINTK("EXIT, rc=0\n");
+ return 0;
+
+ fail:
+ sata_pmp_detach(dev);
+ if (detach)
+ ata_eh_detach_dev(dev);
+ else
+ ata_dev_disable(dev);
+
+ DPRINTK("EXIT, rc=%d\n", rc);
+ return rc;
+}
+
+static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
+{
+ struct ata_link *link;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ata_port_for_each_link(link, ap) {
+ if (!(link->flags & ATA_LFLAG_DISABLED))
+ continue;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* Some PMPs require hardreset sequence to get
+ * SError.N working.
+ */
+ sata_link_hardreset(link, sata_deb_timing_normal,
+ ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK),
+ NULL, NULL);
+
+ /* unconditionally clear SError.N */
+ rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
+ if (rc) {
+ ata_link_printk(link, KERN_ERR, "failed to clear "
+ "SError.N (errno=%d)\n", rc);
+ return rc;
+ }
+
+ spin_lock_irqsave(ap->lock, flags);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return 0;
+}
+
+static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
+{
+ struct ata_port *ap = link->ap;
+ unsigned long flags;
+
+ if (link_tries[link->pmp] && --link_tries[link->pmp])
+ return 1;
+
+ /* disable this link */
+ if (!(link->flags & ATA_LFLAG_DISABLED)) {
+ ata_link_printk(link, KERN_WARNING,
+ "failed to recover link after %d tries, disabling\n",
+ ATA_EH_PMP_LINK_TRIES);
+
+ spin_lock_irqsave(ap->lock, flags);
+ link->flags |= ATA_LFLAG_DISABLED;
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+
+ ata_dev_disable(link->device);
+ link->eh_context.i.action = 0;
+
+ return 0;
+}
+
+/**
+ * sata_pmp_eh_recover - recover PMP-enabled port
+ * @ap: ATA port to recover
+ *
+ * Drive EH recovery operation for PMP enabled port @ap. This
+ * function recovers host and PMP ports with proper retrials and
+ * fallbacks. Actual recovery operations are performed using
+ * ata_eh_recover() and sata_pmp_eh_recover_pmp().
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sata_pmp_eh_recover(struct ata_port *ap)
+{
+ struct ata_port_operations *ops = ap->ops;
+ int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
+ struct ata_link *pmp_link = &ap->link;
+ struct ata_device *pmp_dev = pmp_link->device;
+ struct ata_eh_context *pmp_ehc = &pmp_link->eh_context;
+ u32 *gscr = pmp_dev->gscr;
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned int err_mask;
+ u32 gscr_error, sntf;
+ int cnt, rc;
+
+ pmp_tries = ATA_EH_PMP_TRIES;
+ ata_port_for_each_link(link, ap)
+ link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+ retry:
+ /* PMP attached? */
+ if (!sata_pmp_attached(ap)) {
+ rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
+ ops->hardreset, ops->postreset, NULL);
+ if (rc) {
+ ata_link_for_each_dev(dev, &ap->link)
+ ata_dev_disable(dev);
+ return rc;
+ }
+
+ if (pmp_dev->class != ATA_DEV_PMP)
+ return 0;
+
+ /* new PMP online */
+ ata_port_for_each_link(link, ap)
+ link_tries[link->pmp] = ATA_EH_PMP_LINK_TRIES;
+
+ /* fall through */
+ }
+
+ /* recover pmp */
+ rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
+ ops->hardreset, ops->postreset);
+ if (rc)
+ goto pmp_fail;
+
+ /* PHY event notification can disturb reset and other recovery
+ * operations. Turn it off.
+ */
+ if (gscr[SATA_PMP_GSCR_FEAT_EN] & SATA_PMP_FEAT_NOTIFY) {
+ gscr[SATA_PMP_GSCR_FEAT_EN] &= ~SATA_PMP_FEAT_NOTIFY;
+
+ err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
+ gscr[SATA_PMP_GSCR_FEAT_EN]);
+ if (err_mask) {
+ ata_link_printk(pmp_link, KERN_WARNING,
+ "failed to disable NOTIFY (err_mask=0x%x)\n",
+ err_mask);
+ goto pmp_fail;
+ }
+ }
+
+ /* handle disabled links */
+ rc = sata_pmp_eh_handle_disabled_links(ap);
+ if (rc)
+ goto pmp_fail;
+
+ /* recover links */
+ rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
+ ops->pmp_hardreset, ops->pmp_postreset, &link);
+ if (rc)
+ goto link_fail;
+
+ /* Connection status might have changed while resetting other
+ * links, check SATA_PMP_GSCR_ERROR before returning.
+ */
+
+ /* clear SNotification */
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+ if (rc == 0)
+ sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
+
+ /* enable notification */
+ if (pmp_dev->flags & ATA_DFLAG_AN) {
+ gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY;
+
+ err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
+ gscr[SATA_PMP_GSCR_FEAT_EN]);
+ if (err_mask) {
+ ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
+ "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
+ rc = -EIO;
+ goto pmp_fail;
+ }
+ }
+
+ /* check GSCR_ERROR */
+ err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
+ if (err_mask) {
+ ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
+ "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
+ rc = -EIO;
+ goto pmp_fail;
+ }
+
+ cnt = 0;
+ ata_port_for_each_link(link, ap) {
+ if (!(gscr_error & (1 << link->pmp)))
+ continue;
+
+ if (sata_pmp_handle_link_fail(link, link_tries)) {
+ ata_ehi_hotplugged(&link->eh_context.i);
+ cnt++;
+ } else {
+ ata_link_printk(link, KERN_WARNING,
+ "PHY status changed but maxed out on retries, "
+ "giving up\n");
+ ata_link_printk(link, KERN_WARNING,
+ "Manully issue scan to resume this link\n");
+ }
+ }
+
+ if (cnt) {
+ ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
+ "ports, repeating recovery\n");
+ goto retry;
+ }
+
+ return 0;
+
+ link_fail:
+ if (sata_pmp_handle_link_fail(link, link_tries)) {
+ pmp_ehc->i.action |= ATA_EH_RESET;
+ goto retry;
+ }
+
+ /* fall through */
+ pmp_fail:
+ /* Control always ends up here after detaching PMP. Shut up
+ * and return if we're unloading.
+ */
+ if (ap->pflags & ATA_PFLAG_UNLOADING)
+ return rc;
+
+ if (!sata_pmp_attached(ap))
+ goto retry;
+
+ if (--pmp_tries) {
+ pmp_ehc->i.action |= ATA_EH_RESET;
+ goto retry;
+ }
+
+ ata_port_printk(ap, KERN_ERR,
+ "failed to recover PMP after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
+ sata_pmp_detach(pmp_dev);
+ ata_dev_disable(pmp_dev);
+
+ return rc;
+}
+
+/**
+ * sata_pmp_error_handler - do standard error handling for PMP-enabled host
+ * @ap: host port to handle error for
+ *
+ * Perform standard error handling sequence for PMP-enabled host
+ * @ap.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void sata_pmp_error_handler(struct ata_port *ap)
+{
+ ata_eh_autopsy(ap);
+ ata_eh_report(ap);
+ sata_pmp_eh_recover(ap);
+ ata_eh_finish(ap);
+}
+
+EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
+EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
+EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
new file mode 100644
index 0000000..47c7afc
--- /dev/null
+++ b/drivers/ata/libata-scsi.c
@@ -0,0 +1,3716 @@
+/*
+ * libata-scsi.c - helper library for ATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from
+ * - http://www.t10.org/
+ * - http://www.t13.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <linux/libata.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+
+#include "libata.h"
+
+#define SECTOR_SIZE 512
+#define ATA_SCSI_RBUF_SIZE 4096
+
+static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
+static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
+
+typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
+
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
+ const struct scsi_device *scsidev);
+static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
+ const struct scsi_device *scsidev);
+static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+ unsigned int id, unsigned int lun);
+
+
+#define RW_RECOVERY_MPAGE 0x1
+#define RW_RECOVERY_MPAGE_LEN 12
+#define CACHE_MPAGE 0x8
+#define CACHE_MPAGE_LEN 20
+#define CONTROL_MPAGE 0xa
+#define CONTROL_MPAGE_LEN 12
+#define ALL_MPAGES 0x3f
+#define ALL_SUB_MPAGES 0xff
+
+
+static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
+ RW_RECOVERY_MPAGE,
+ RW_RECOVERY_MPAGE_LEN - 2,
+ (1 << 7), /* AWRE */
+ 0, /* read retry count */
+ 0, 0, 0, 0,
+ 0, /* write retry count */
+ 0, 0, 0
+};
+
+static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
+ CACHE_MPAGE,
+ CACHE_MPAGE_LEN - 2,
+ 0, /* contains WCE, needs to be 0 for logic */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, /* contains DRA, needs to be 0 for logic */
+ 0, 0, 0, 0, 0, 0, 0
+};
+
+static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
+ CONTROL_MPAGE,
+ CONTROL_MPAGE_LEN - 2,
+ 2, /* DSENSE=0, GLTSD=1 */
+ 0, /* [QAM+QERR may be 1, see 05-359r1] */
+ 0, 0, 0, 0, 0xff, 0xff,
+ 0, 30 /* extended self test time, see 05-359r1 */
+};
+
+/*
+ * libata transport template. libata doesn't do real transport stuff.
+ * It just needs the eh_timed_out hook.
+ */
+static struct scsi_transport_template ata_scsi_transport_template = {
+ .eh_strategy_handler = ata_scsi_error,
+ .eh_timed_out = ata_scsi_timed_out,
+ .user_scan = ata_scsi_user_scan,
+};
+
+
+static const struct {
+ enum link_pm value;
+ const char *name;
+} link_pm_policy[] = {
+ { NOT_AVAILABLE, "max_performance" },
+ { MIN_POWER, "min_power" },
+ { MAX_PERFORMANCE, "max_performance" },
+ { MEDIUM_POWER, "medium_power" },
+};
+
+static const char *ata_scsi_lpm_get(enum link_pm policy)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
+ if (link_pm_policy[i].value == policy)
+ return link_pm_policy[i].name;
+
+ return NULL;
+}
+
+static ssize_t ata_scsi_lpm_put(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ enum link_pm policy = 0;
+ int i;
+
+ /*
+ * we are skipping array location 0 on purpose - this
+ * is because a value of NOT_AVAILABLE is displayed
+ * to the user as max_performance, but when the user
+ * writes "max_performance", they actually want the
+ * value to match MAX_PERFORMANCE.
+ */
+ for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
+ const int len = strlen(link_pm_policy[i].name);
+ if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
+ buf[len] == '\n') {
+ policy = link_pm_policy[i].value;
+ break;
+ }
+ }
+ if (!policy)
+ return -EINVAL;
+
+ ata_lpm_schedule(ap, policy);
+ return count;
+}
+
+static ssize_t
+ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ const char *policy =
+ ata_scsi_lpm_get(ap->pm_policy);
+
+ if (!policy)
+ return -EINVAL;
+
+ return snprintf(buf, 23, "%s\n", policy);
+}
+DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+ ata_scsi_lpm_show, ata_scsi_lpm_put);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+
+static ssize_t ata_scsi_park_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags, now;
+ unsigned int uninitialized_var(msecs);
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ link = dev->link;
+ now = jiffies;
+ if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
+ link->eh_context.unloaded_mask & (1 << dev->devno) &&
+ time_after(dev->unpark_deadline, now))
+ msecs = jiffies_to_msecs(dev->unpark_deadline - now);
+ else
+ msecs = 0;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
+}
+
+static ssize_t ata_scsi_park_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ unsigned long flags;
+ int rc;
+
+ rc = strict_strtol(buf, 10, &input);
+ if (rc || input < -2)
+ return -EINVAL;
+ if (input > ATA_TMOUT_MAX_PARK) {
+ rc = -EOVERFLOW;
+ input = ATA_TMOUT_MAX_PARK;
+ }
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev)) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+ if (dev->class != ATA_DEV_ATA) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (input >= 0) {
+ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ dev->unpark_deadline = ata_deadline(jiffies, input);
+ dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
+ ata_port_schedule_eh(ap);
+ complete(&ap->park_req_pending);
+ } else {
+ switch (input) {
+ case -1:
+ dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
+ break;
+ case -2:
+ dev->flags |= ATA_DFLAG_NO_UNLOAD;
+ break;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc ? rc : len;
+}
+DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
+ ata_scsi_park_show, ata_scsi_park_store);
+EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+{
+ cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+ scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
+}
+
+static ssize_t
+ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
+ return ap->ops->em_store(ap, buf, count);
+ return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+
+ if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
+ return ap->ops->em_show(ap, buf);
+ return -EINVAL;
+}
+DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO,
+ ata_scsi_em_message_show, ata_scsi_em_message_store);
+EXPORT_SYMBOL_GPL(dev_attr_em_message);
+
+static ssize_t
+ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+
+ return snprintf(buf, 23, "%d\n", ap->em_message_type);
+}
+DEVICE_ATTR(em_message_type, S_IRUGO,
+ ata_scsi_em_message_type_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
+
+static ssize_t
+ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+ if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ return ap->ops->sw_activity_show(atadev, buf);
+ return -EINVAL;
+}
+
+static ssize_t
+ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+ enum sw_activity val;
+ int rc;
+
+ if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ val = simple_strtoul(buf, NULL, 0);
+ switch (val) {
+ case OFF: case BLINK_ON: case BLINK_OFF:
+ rc = ap->ops->sw_activity_store(atadev, val);
+ if (!rc)
+ return count;
+ else
+ return rc;
+ }
+ }
+ return -EINVAL;
+}
+DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
+ ata_scsi_activity_store);
+EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+
+struct device_attribute *ata_common_sdev_attrs[] = {
+ &dev_attr_unload_heads,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
+
+static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ done(cmd);
+}
+
+/**
+ * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
+ * @sdev: SCSI device for which BIOS geometry is to be determined
+ * @bdev: block device associated with @sdev
+ * @capacity: capacity of SCSI device
+ * @geom: location to which geometry will be output
+ *
+ * Generic bios head/sector/cylinder calculator
+ * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
+ * mapping. Some situations may arise where the disk is not
+ * bootable if this is not used.
+ *
+ * LOCKING:
+ * Defined by the SCSI layer. We don't really care.
+ *
+ * RETURNS:
+ * Zero.
+ */
+int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ geom[0] = 255;
+ geom[1] = 63;
+ sector_div(capacity, 255*63);
+ geom[2] = capacity;
+
+ return 0;
+}
+
+/**
+ * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
+ * @sdev: SCSI device to get identify data for
+ * @arg: User buffer area for identify data
+ *
+ * LOCKING:
+ * Defined by the SCSI layer. We don't really care.
+ *
+ * RETURNS:
+ * Zero on success, negative errno on error.
+ */
+static int ata_get_identity(struct scsi_device *sdev, void __user *arg)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
+ u16 __user *dst = arg;
+ char buf[40];
+
+ if (!dev)
+ return -ENOMSG;
+
+ if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
+ return -EFAULT;
+
+ ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
+ if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
+ return -EFAULT;
+
+ ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
+ if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
+ return -EFAULT;
+
+ ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+ if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
+ * @scsidev: Device to which we are issuing command
+ * @arg: User provided data for issuing command
+ *
+ * LOCKING:
+ * Defined by the SCSI layer. We don't really care.
+ *
+ * RETURNS:
+ * Zero on success, negative errno on error.
+ */
+int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
+{
+ int rc = 0;
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ u8 args[4], *argbuf = NULL, *sensebuf = NULL;
+ int argsize = 0;
+ enum dma_data_direction data_dir;
+ int cmd_result;
+
+ if (arg == NULL)
+ return -EINVAL;
+
+ if (copy_from_user(args, arg, sizeof(args)))
+ return -EFAULT;
+
+ sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+ if (!sensebuf)
+ return -ENOMEM;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+
+ if (args[3]) {
+ argsize = SECTOR_SIZE * args[3];
+ argbuf = kmalloc(argsize, GFP_KERNEL);
+ if (argbuf == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ scsi_cmd[1] = (4 << 1); /* PIO Data-in */
+ scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
+ block count in sector count field */
+ data_dir = DMA_FROM_DEVICE;
+ } else {
+ scsi_cmd[1] = (3 << 1); /* Non-data */
+ scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
+ data_dir = DMA_NONE;
+ }
+
+ scsi_cmd[0] = ATA_16;
+
+ scsi_cmd[4] = args[2];
+ if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
+ scsi_cmd[6] = args[3];
+ scsi_cmd[8] = args[1];
+ scsi_cmd[10] = 0x4f;
+ scsi_cmd[12] = 0xc2;
+ } else {
+ scsi_cmd[6] = args[1];
+ }
+ scsi_cmd[14] = args[0];
+
+ /* Good values for timeout and retries? Values below
+ from scsi_ioctl_send_command() for default case... */
+ cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
+ sensebuf, (10*HZ), 5, 0);
+
+ if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ u8 *desc = sensebuf + 8;
+ cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+ /* If we set cc then ATA pass-through will cause a
+ * check condition even if no error. Filter that. */
+ if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+ struct scsi_sense_hdr sshdr;
+ scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
+ cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+ }
+
+ /* Send userspace a few ATA registers (same as drivers/ide) */
+ if (sensebuf[0] == 0x72 && /* format is "descriptor" */
+ desc[0] == 0x09) { /* code is "ATA Descriptor" */
+ args[0] = desc[13]; /* status */
+ args[1] = desc[3]; /* error */
+ args[2] = desc[5]; /* sector count (0:7) */
+ if (copy_to_user(arg, args, sizeof(args)))
+ rc = -EFAULT;
+ }
+ }
+
+
+ if (cmd_result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ if ((argbuf)
+ && copy_to_user(arg + sizeof(args), argbuf, argsize))
+ rc = -EFAULT;
+error:
+ kfree(sensebuf);
+ kfree(argbuf);
+ return rc;
+}
+
+/**
+ * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
+ * @scsidev: Device to which we are issuing command
+ * @arg: User provided data for issuing command
+ *
+ * LOCKING:
+ * Defined by the SCSI layer. We don't really care.
+ *
+ * RETURNS:
+ * Zero on success, negative errno on error.
+ */
+int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
+{
+ int rc = 0;
+ u8 scsi_cmd[MAX_COMMAND_SIZE];
+ u8 args[7], *sensebuf = NULL;
+ int cmd_result;
+
+ if (arg == NULL)
+ return -EINVAL;
+
+ if (copy_from_user(args, arg, sizeof(args)))
+ return -EFAULT;
+
+ sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+ if (!sensebuf)
+ return -ENOMEM;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = ATA_16;
+ scsi_cmd[1] = (3 << 1); /* Non-data */
+ scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
+ scsi_cmd[4] = args[1];
+ scsi_cmd[6] = args[2];
+ scsi_cmd[8] = args[3];
+ scsi_cmd[10] = args[4];
+ scsi_cmd[12] = args[5];
+ scsi_cmd[13] = args[6] & 0x4f;
+ scsi_cmd[14] = args[0];
+
+ /* Good values for timeout and retries? Values below
+ from scsi_ioctl_send_command() for default case... */
+ cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
+ sensebuf, (10*HZ), 5, 0);
+
+ if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ u8 *desc = sensebuf + 8;
+ cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+ /* If we set cc then ATA pass-through will cause a
+ * check condition even if no error. Filter that. */
+ if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+ struct scsi_sense_hdr sshdr;
+ scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
+ &sshdr);
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
+ cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+ }
+
+ /* Send userspace ATA registers */
+ if (sensebuf[0] == 0x72 && /* format is "descriptor" */
+ desc[0] == 0x09) {/* code is "ATA Descriptor" */
+ args[0] = desc[13]; /* status */
+ args[1] = desc[3]; /* error */
+ args[2] = desc[5]; /* sector count (0:7) */
+ args[3] = desc[7]; /* lbal */
+ args[4] = desc[9]; /* lbam */
+ args[5] = desc[11]; /* lbah */
+ args[6] = desc[12]; /* select */
+ if (copy_to_user(arg, args, sizeof(args)))
+ rc = -EFAULT;
+ }
+ }
+
+ if (cmd_result) {
+ rc = -EIO;
+ goto error;
+ }
+
+ error:
+ kfree(sensebuf);
+ return rc;
+}
+
+int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
+{
+ int val = -EINVAL, rc = -EINVAL;
+
+ switch (cmd) {
+ case ATA_IOC_GET_IO32:
+ val = 0;
+ if (copy_to_user(arg, &val, 1))
+ return -EFAULT;
+ return 0;
+
+ case ATA_IOC_SET_IO32:
+ val = (unsigned long) arg;
+ if (val != 0)
+ return -EINVAL;
+ return 0;
+
+ case HDIO_GET_IDENTITY:
+ return ata_get_identity(scsidev, arg);
+
+ case HDIO_DRIVE_CMD:
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return ata_cmd_ioctl(scsidev, arg);
+
+ case HDIO_DRIVE_TASK:
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return ata_task_ioctl(scsidev, arg);
+
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ata_scsi_qc_new - acquire new ata_queued_cmd reference
+ * @dev: ATA device to which the new command is attached
+ * @cmd: SCSI command that originated this ATA command
+ * @done: SCSI command completion function
+ *
+ * Obtain a reference to an unused ata_queued_cmd structure,
+ * which is the basic libata structure representing a single
+ * ATA command sent to the hardware.
+ *
+ * If a command was available, fill in the SCSI-specific
+ * portions of the structure with information on the
+ * current command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Command allocated, or %NULL if none available.
+ */
+static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
+ struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_new_init(dev);
+ if (qc) {
+ qc->scsicmd = cmd;
+ qc->scsidone = done;
+
+ qc->sg = scsi_sglist(cmd);
+ qc->n_elem = scsi_sg_count(cmd);
+ } else {
+ cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+ done(cmd);
+ }
+
+ return qc;
+}
+
+static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+
+ qc->extrabytes = scmd->request->extra_len;
+ qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
+}
+
+/**
+ * ata_dump_status - user friendly display of error info
+ * @id: id of the port in question
+ * @tf: ptr to filled out taskfile
+ *
+ * Decode and dump the ATA error/status registers for the user so
+ * that they have some idea what really happened at the non
+ * make-believe layer.
+ *
+ * LOCKING:
+ * inherited from caller
+ */
+static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
+{
+ u8 stat = tf->command, err = tf->feature;
+
+ printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
+ if (stat & ATA_BUSY) {
+ printk("Busy }\n"); /* Data is not valid in this case */
+ } else {
+ if (stat & 0x40) printk("DriveReady ");
+ if (stat & 0x20) printk("DeviceFault ");
+ if (stat & 0x10) printk("SeekComplete ");
+ if (stat & 0x08) printk("DataRequest ");
+ if (stat & 0x04) printk("CorrectedError ");
+ if (stat & 0x02) printk("Index ");
+ if (stat & 0x01) printk("Error ");
+ printk("}\n");
+
+ if (err) {
+ printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
+ if (err & 0x04) printk("DriveStatusError ");
+ if (err & 0x80) {
+ if (err & 0x04) printk("BadCRC ");
+ else printk("Sector ");
+ }
+ if (err & 0x40) printk("UncorrectableError ");
+ if (err & 0x10) printk("SectorIdNotFound ");
+ if (err & 0x02) printk("TrackZeroNotFound ");
+ if (err & 0x01) printk("AddrMarkNotFound ");
+ printk("}\n");
+ }
+ }
+}
+
+/**
+ * ata_to_sense_error - convert ATA error to SCSI error
+ * @id: ATA device number
+ * @drv_stat: value contained in ATA status register
+ * @drv_err: value contained in ATA error register
+ * @sk: the sense key we'll fill out
+ * @asc: the additional sense code we'll fill out
+ * @ascq: the additional sense code qualifier we'll fill out
+ * @verbose: be verbose
+ *
+ * Converts an ATA error into a SCSI error. Fill out pointers to
+ * SK, ASC, and ASCQ bytes for later use in fixed or descriptor
+ * format sense blocks.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
+ u8 *asc, u8 *ascq, int verbose)
+{
+ int i;
+
+ /* Based on the 3ware driver translation table */
+ static const unsigned char sense_table[][4] = {
+ /* BBD|ECC|ID|MAR */
+ {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
+ /* BBD|ECC|ID */
+ {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
+ /* ECC|MC|MARK */
+ {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
+ /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
+ {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
+ /* MC|ID|ABRT|TRK0|MARK */
+ {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
+ /* MCR|MARK */
+ {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
+ /* Bad address mark */
+ {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
+ /* TRK0 */
+ {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
+ /* Abort & !ICRC */
+ {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
+ /* Media change request */
+ {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
+ /* SRV */
+ {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
+ /* Media change */
+ {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
+ /* ECC */
+ {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
+ /* BBD - block marked bad */
+ {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
+ {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ };
+ static const unsigned char stat_table[][4] = {
+ /* Must be first because BUSY means no other bits valid */
+ {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
+ {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
+ {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
+ {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
+ {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ };
+
+ /*
+ * Is this an error we can process/parse
+ */
+ if (drv_stat & ATA_BUSY) {
+ drv_err = 0; /* Ignore the err bits, they're invalid */
+ }
+
+ if (drv_err) {
+ /* Look for drv_err */
+ for (i = 0; sense_table[i][0] != 0xFF; i++) {
+ /* Look for best matches first */
+ if ((sense_table[i][0] & drv_err) ==
+ sense_table[i][0]) {
+ *sk = sense_table[i][1];
+ *asc = sense_table[i][2];
+ *ascq = sense_table[i][3];
+ goto translate_done;
+ }
+ }
+ /* No immediate match */
+ if (verbose)
+ printk(KERN_WARNING "ata%u: no sense translation for "
+ "error 0x%02x\n", id, drv_err);
+ }
+
+ /* Fall back to interpreting status bits */
+ for (i = 0; stat_table[i][0] != 0xFF; i++) {
+ if (stat_table[i][0] & drv_stat) {
+ *sk = stat_table[i][1];
+ *asc = stat_table[i][2];
+ *ascq = stat_table[i][3];
+ goto translate_done;
+ }
+ }
+ /* No error? Undecoded? */
+ if (verbose)
+ printk(KERN_WARNING "ata%u: no sense translation for "
+ "status: 0x%02x\n", id, drv_stat);
+
+ /* We need a sensible error return here, which is tricky, and one
+ that won't cause people to do things like return a disk wrongly */
+ *sk = ABORTED_COMMAND;
+ *asc = 0x00;
+ *ascq = 0x00;
+
+ translate_done:
+ if (verbose)
+ printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
+ "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
+ id, drv_stat, drv_err, *sk, *asc, *ascq);
+ return;
+}
+
+/*
+ * ata_gen_passthru_sense - Generate check condition sense block.
+ * @qc: Command that completed.
+ *
+ * This function is specific to the ATA descriptor format sense
+ * block specified for the ATA pass through commands. Regardless
+ * of whether the command errored or not, return a sense
+ * block. Copy all controller registers into the sense
+ * block. Clear sense key, ASC & ASCQ if there is no error.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+ unsigned char *desc = sb + 8;
+ int verbose = qc->ap->ops->error_handler == NULL;
+
+ memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+
+ cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+ /*
+ * Use ata_to_sense_error() to map status register bits
+ * onto sense key, asc & ascq.
+ */
+ if (qc->err_mask ||
+ tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ &sb[1], &sb[2], &sb[3], verbose);
+ sb[1] &= 0x0f;
+ }
+
+ /*
+ * Sense data is current and format is descriptor.
+ */
+ sb[0] = 0x72;
+
+ desc[0] = 0x09;
+
+ /* set length of additional sense data */
+ sb[7] = 14;
+ desc[1] = 12;
+
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->feature; /* == error reg */
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->command; /* == status reg */
+
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+}
+
+/**
+ * ata_gen_ata_sense - generate a SCSI fixed sense block
+ * @qc: Command that we are erroring out
+ *
+ * Generate sense block for a failed ATA command @qc. Descriptor
+ * format is used to accomodate LBA48 block address.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->result_tf;
+ unsigned char *sb = cmd->sense_buffer;
+ unsigned char *desc = sb + 8;
+ int verbose = qc->ap->ops->error_handler == NULL;
+ u64 block;
+
+ memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
+
+ cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+ /* sense data is current and format is descriptor */
+ sb[0] = 0x72;
+
+ /* Use ata_to_sense_error() to map status register bits
+ * onto sense key, asc & ascq.
+ */
+ if (qc->err_mask ||
+ tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
+ ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
+ &sb[1], &sb[2], &sb[3], verbose);
+ sb[1] &= 0x0f;
+ }
+
+ block = ata_tf_read_block(&qc->result_tf, dev);
+
+ /* information sense data descriptor */
+ sb[7] = 12;
+ desc[0] = 0x00;
+ desc[1] = 10;
+
+ desc[2] |= 0x80; /* valid */
+ desc[6] = block >> 40;
+ desc[7] = block >> 32;
+ desc[8] = block >> 24;
+ desc[9] = block >> 16;
+ desc[10] = block >> 8;
+ desc[11] = block;
+}
+
+static void ata_scsi_sdev_config(struct scsi_device *sdev)
+{
+ sdev->use_10_for_rw = 1;
+ sdev->use_10_for_ms = 1;
+
+ /* Schedule policy is determined by ->qc_defer() callback and
+ * it needs to see every deferred qc. Set dev_blocked to 1 to
+ * prevent SCSI midlayer from automatically deferring
+ * requests.
+ */
+ sdev->max_device_blocked = 1;
+}
+
+/**
+ * atapi_drain_needed - Check whether data transfer may overflow
+ * @rq: request to be checked
+ *
+ * ATAPI commands which transfer variable length data to host
+ * might overflow due to application error or hardare bug. This
+ * function checks whether overflow should be drained and ignored
+ * for @request.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * 1 if ; otherwise, 0.
+ */
+static int atapi_drain_needed(struct request *rq)
+{
+ if (likely(!blk_pc_request(rq)))
+ return 0;
+
+ if (!rq->data_len || (rq->cmd_flags & REQ_RW))
+ return 0;
+
+ return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+}
+
+static int ata_scsi_dev_config(struct scsi_device *sdev,
+ struct ata_device *dev)
+{
+ if (!ata_id_has_unload(dev->id))
+ dev->flags |= ATA_DFLAG_NO_UNLOAD;
+
+ /* configure max sectors */
+ blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
+
+ if (dev->class == ATA_DEV_ATAPI) {
+ struct request_queue *q = sdev->request_queue;
+ void *buf;
+
+ /* set the min alignment and padding */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_DMA_PAD_SZ - 1);
+ blk_queue_update_dma_pad(sdev->request_queue,
+ ATA_DMA_PAD_SZ - 1);
+
+ /* configure draining */
+ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
+ if (!buf) {
+ ata_dev_printk(dev, KERN_ERR,
+ "drain buffer allocation failed\n");
+ return -ENOMEM;
+ }
+
+ blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
+ } else {
+ if (ata_id_is_ssd(dev->id))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
+ sdev->request_queue);
+
+ /* ATA devices must be sector aligned */
+ blk_queue_update_dma_alignment(sdev->request_queue,
+ ATA_SECT_SIZE - 1);
+ sdev->manage_start_stop = 1;
+ }
+
+ if (dev->flags & ATA_DFLAG_AN)
+ set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
+
+ if (dev->flags & ATA_DFLAG_NCQ) {
+ int depth;
+
+ depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
+ depth = min(ATA_MAX_QUEUE - 1, depth);
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsi_slave_config - Set SCSI device attributes
+ * @sdev: SCSI device to examine
+ *
+ * This is called before we actually start reading
+ * and writing to the device, to configure certain
+ * SCSI mid-layer behaviors.
+ *
+ * LOCKING:
+ * Defined by SCSI layer. We don't really care.
+ */
+
+int ata_scsi_slave_config(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+ int rc = 0;
+
+ ata_scsi_sdev_config(sdev);
+
+ if (dev)
+ rc = ata_scsi_dev_config(sdev, dev);
+
+ return rc;
+}
+
+/**
+ * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * @sdev: SCSI device to be destroyed
+ *
+ * @sdev is about to be destroyed for hot/warm unplugging. If
+ * this unplugging was initiated by libata as indicated by NULL
+ * dev->sdev, this function doesn't have to do anything.
+ * Otherwise, SCSI layer initiated warm-unplug is in progress.
+ * Clear dev->sdev, schedule the device for ATA detach and invoke
+ * EH.
+ *
+ * LOCKING:
+ * Defined by SCSI layer. We don't really care.
+ */
+void ata_scsi_slave_destroy(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct request_queue *q = sdev->request_queue;
+ unsigned long flags;
+ struct ata_device *dev;
+
+ if (!ap->ops->error_handler)
+ return;
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev = __ata_scsi_find_dev(ap, sdev);
+ if (dev && dev->sdev) {
+ /* SCSI device already in CANCEL state, no need to offline it */
+ dev->sdev = NULL;
+ dev->flags |= ATA_DFLAG_DETACH;
+ ata_port_schedule_eh(ap);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ kfree(q->dma_drain_buffer);
+ q->dma_drain_buffer = NULL;
+ q->dma_drain_size = 0;
+}
+
+/**
+ * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * @sdev: SCSI device to configure queue depth for
+ * @queue_depth: new queue depth
+ *
+ * This is libata standard hostt->change_queue_depth callback.
+ * SCSI will call into this callback when user tries to set queue
+ * depth via sysfs.
+ *
+ * LOCKING:
+ * SCSI layer (we don't care)
+ *
+ * RETURNS:
+ * Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *dev;
+ unsigned long flags;
+
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ return sdev->queue_depth;
+
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev || !ata_dev_enabled(dev))
+ return sdev->queue_depth;
+
+ /* NCQ enabled? */
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags &= ~ATA_DFLAG_NCQ_OFF;
+ if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
+ dev->flags |= ATA_DFLAG_NCQ_OFF;
+ queue_depth = 1;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* limit and apply queue depth */
+ queue_depth = min(queue_depth, sdev->host->can_queue);
+ queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
+ queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
+
+ if (sdev->queue_depth == queue_depth)
+ return -EINVAL;
+
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
+ return queue_depth;
+}
+
+/* XXX: for spindown warning */
+static void ata_delayed_done_timerfn(unsigned long arg)
+{
+ struct scsi_cmnd *scmd = (void *)arg;
+
+ scmd->scsi_done(scmd);
+}
+
+/* XXX: for spindown warning */
+static void ata_delayed_done(struct scsi_cmnd *scmd)
+{
+ static struct timer_list timer;
+
+ setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
+ mod_timer(&timer, jiffies + 5 * HZ);
+}
+
+/**
+ * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
+ * @qc: Storage for translated ATA taskfile
+ *
+ * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
+ * (to start). Perhaps these commands should be preceded by
+ * CHECK POWER MODE to see what power mode the device is already in.
+ * [See SAT revision 5 at www.t10.org]
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->tf;
+ const u8 *cdb = scmd->cmnd;
+
+ if (scmd->cmd_len < 5)
+ goto invalid_fld;
+
+ tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf->protocol = ATA_PROT_NODATA;
+ if (cdb[1] & 0x1) {
+ ; /* ignore IMMED bit, violates sat-r05 */
+ }
+ if (cdb[4] & 0x2)
+ goto invalid_fld; /* LOEJ bit set not supported */
+ if (((cdb[4] >> 4) & 0xf) != 0)
+ goto invalid_fld; /* power conditions not supported */
+
+ if (cdb[4] & 0x1) {
+ tf->nsect = 1; /* 1 sector, lba=0 */
+
+ if (qc->dev->flags & ATA_DFLAG_LBA) {
+ tf->flags |= ATA_TFLAG_LBA;
+
+ tf->lbah = 0x0;
+ tf->lbam = 0x0;
+ tf->lbal = 0x0;
+ tf->device |= ATA_LBA;
+ } else {
+ /* CHS */
+ tf->lbal = 0x1; /* sect */
+ tf->lbam = 0x0; /* cyl low */
+ tf->lbah = 0x0; /* cyl high */
+ }
+
+ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
+ } else {
+ /* XXX: This is for backward compatibility, will be
+ * removed. Read Documentation/feature-removal-schedule.txt
+ * for more info.
+ */
+ if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
+ (system_state == SYSTEM_HALT ||
+ system_state == SYSTEM_POWER_OFF)) {
+ static unsigned long warned;
+
+ if (!test_and_set_bit(0, &warned)) {
+ ata_dev_printk(qc->dev, KERN_WARNING,
+ "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
+ "UPDATE SHUTDOWN UTILITY\n");
+ ata_dev_printk(qc->dev, KERN_WARNING,
+ "For more info, visit "
+ "http://linux-ata.org/shutdown.html\n");
+
+ /* ->scsi_done is not used, use it for
+ * delayed completion.
+ */
+ scmd->scsi_done = qc->scsidone;
+ qc->scsidone = ata_delayed_done;
+ }
+ scmd->result = SAM_STAT_GOOD;
+ return 1;
+ }
+
+ /* Issue ATA STANDBY IMMEDIATE command */
+ tf->command = ATA_CMD_STANDBYNOW1;
+ }
+
+ /*
+ * Standby and Idle condition timers could be implemented but that
+ * would require libata to implement the Power condition mode page
+ * and allow the user to change it. Changing mode pages requires
+ * MODE SELECT to be implemented.
+ */
+
+ return 0;
+
+invalid_fld:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ return 1;
+}
+
+
+/**
+ * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
+ * @qc: Storage for translated ATA taskfile
+ *
+ * Sets up an ATA taskfile to issue FLUSH CACHE or
+ * FLUSH CACHE EXT.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *tf = &qc->tf;
+
+ tf->flags |= ATA_TFLAG_DEVICE;
+ tf->protocol = ATA_PROT_NODATA;
+
+ if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
+ tf->command = ATA_CMD_FLUSH_EXT;
+ else
+ tf->command = ATA_CMD_FLUSH;
+
+ /* flush is critical for IO integrity, consider it an IO command */
+ qc->flags |= ATA_QCFLAG_IO;
+
+ return 0;
+}
+
+/**
+ * scsi_6_lba_len - Get LBA and transfer length
+ * @cdb: SCSI command to translate
+ *
+ * Calculate LBA and transfer length for 6-byte commands.
+ *
+ * RETURNS:
+ * @plba: the LBA
+ * @plen: the transfer length
+ */
+static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+ u64 lba = 0;
+ u32 len;
+
+ VPRINTK("six-byte command\n");
+
+ lba |= ((u64)(cdb[1] & 0x1f)) << 16;
+ lba |= ((u64)cdb[2]) << 8;
+ lba |= ((u64)cdb[3]);
+
+ len = cdb[4];
+
+ *plba = lba;
+ *plen = len;
+}
+
+/**
+ * scsi_10_lba_len - Get LBA and transfer length
+ * @cdb: SCSI command to translate
+ *
+ * Calculate LBA and transfer length for 10-byte commands.
+ *
+ * RETURNS:
+ * @plba: the LBA
+ * @plen: the transfer length
+ */
+static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+ u64 lba = 0;
+ u32 len = 0;
+
+ VPRINTK("ten-byte command\n");
+
+ lba |= ((u64)cdb[2]) << 24;
+ lba |= ((u64)cdb[3]) << 16;
+ lba |= ((u64)cdb[4]) << 8;
+ lba |= ((u64)cdb[5]);
+
+ len |= ((u32)cdb[7]) << 8;
+ len |= ((u32)cdb[8]);
+
+ *plba = lba;
+ *plen = len;
+}
+
+/**
+ * scsi_16_lba_len - Get LBA and transfer length
+ * @cdb: SCSI command to translate
+ *
+ * Calculate LBA and transfer length for 16-byte commands.
+ *
+ * RETURNS:
+ * @plba: the LBA
+ * @plen: the transfer length
+ */
+static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
+{
+ u64 lba = 0;
+ u32 len = 0;
+
+ VPRINTK("sixteen-byte command\n");
+
+ lba |= ((u64)cdb[2]) << 56;
+ lba |= ((u64)cdb[3]) << 48;
+ lba |= ((u64)cdb[4]) << 40;
+ lba |= ((u64)cdb[5]) << 32;
+ lba |= ((u64)cdb[6]) << 24;
+ lba |= ((u64)cdb[7]) << 16;
+ lba |= ((u64)cdb[8]) << 8;
+ lba |= ((u64)cdb[9]);
+
+ len |= ((u32)cdb[10]) << 24;
+ len |= ((u32)cdb[11]) << 16;
+ len |= ((u32)cdb[12]) << 8;
+ len |= ((u32)cdb[13]);
+
+ *plba = lba;
+ *plen = len;
+}
+
+/**
+ * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
+ * @qc: Storage for translated ATA taskfile
+ *
+ * Converts SCSI VERIFY command to an ATA READ VERIFY command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct ata_taskfile *tf = &qc->tf;
+ struct ata_device *dev = qc->dev;
+ u64 dev_sectors = qc->dev->n_sectors;
+ const u8 *cdb = scmd->cmnd;
+ u64 block;
+ u32 n_block;
+
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->protocol = ATA_PROT_NODATA;
+
+ if (cdb[0] == VERIFY) {
+ if (scmd->cmd_len < 10)
+ goto invalid_fld;
+ scsi_10_lba_len(cdb, &block, &n_block);
+ } else if (cdb[0] == VERIFY_16) {
+ if (scmd->cmd_len < 16)
+ goto invalid_fld;
+ scsi_16_lba_len(cdb, &block, &n_block);
+ } else
+ goto invalid_fld;
+
+ if (!n_block)
+ goto nothing_to_do;
+ if (block >= dev_sectors)
+ goto out_of_range;
+ if ((block + n_block) > dev_sectors)
+ goto out_of_range;
+
+ if (dev->flags & ATA_DFLAG_LBA) {
+ tf->flags |= ATA_TFLAG_LBA;
+
+ if (lba_28_ok(block, n_block)) {
+ /* use LBA28 */
+ tf->command = ATA_CMD_VERIFY;
+ tf->device |= (block >> 24) & 0xf;
+ } else if (lba_48_ok(block, n_block)) {
+ if (!(dev->flags & ATA_DFLAG_LBA48))
+ goto out_of_range;
+
+ /* use LBA48 */
+ tf->flags |= ATA_TFLAG_LBA48;
+ tf->command = ATA_CMD_VERIFY_EXT;
+
+ tf->hob_nsect = (n_block >> 8) & 0xff;
+
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ } else
+ /* request too large even for LBA48 */
+ goto out_of_range;
+
+ tf->nsect = n_block & 0xff;
+
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+
+ tf->device |= ATA_LBA;
+ } else {
+ /* CHS */
+ u32 sect, head, cyl, track;
+
+ if (!lba_28_ok(block, n_block))
+ goto out_of_range;
+
+ /* Convert LBA to CHS */
+ track = (u32)block / dev->sectors;
+ cyl = track / dev->heads;
+ head = track % dev->heads;
+ sect = (u32)block % dev->sectors + 1;
+
+ DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+ (u32)block, track, cyl, head, sect);
+
+ /* Check whether the converted CHS can fit.
+ Cylinder: 0-65535
+ Head: 0-15
+ Sector: 1-255*/
+ if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+ goto out_of_range;
+
+ tf->command = ATA_CMD_VERIFY;
+ tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+ tf->lbal = sect;
+ tf->lbam = cyl;
+ tf->lbah = cyl >> 8;
+ tf->device |= head;
+ }
+
+ return 0;
+
+invalid_fld:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ return 1;
+
+out_of_range:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+ /* "Logical Block Address out of range" */
+ return 1;
+
+nothing_to_do:
+ scmd->result = SAM_STAT_GOOD;
+ return 1;
+}
+
+/**
+ * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
+ * @qc: Storage for translated ATA taskfile
+ *
+ * Converts any of six SCSI read/write commands into the
+ * ATA counterpart, including starting sector (LBA),
+ * sector count, and taking into account the device's LBA48
+ * support.
+ *
+ * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
+ * %WRITE_16 are currently supported.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ const u8 *cdb = scmd->cmnd;
+ unsigned int tf_flags = 0;
+ u64 block;
+ u32 n_block;
+ int rc;
+
+ if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
+ tf_flags |= ATA_TFLAG_WRITE;
+
+ /* Calculate the SCSI LBA, transfer length and FUA. */
+ switch (cdb[0]) {
+ case READ_10:
+ case WRITE_10:
+ if (unlikely(scmd->cmd_len < 10))
+ goto invalid_fld;
+ scsi_10_lba_len(cdb, &block, &n_block);
+ if (unlikely(cdb[1] & (1 << 3)))
+ tf_flags |= ATA_TFLAG_FUA;
+ break;
+ case READ_6:
+ case WRITE_6:
+ if (unlikely(scmd->cmd_len < 6))
+ goto invalid_fld;
+ scsi_6_lba_len(cdb, &block, &n_block);
+
+ /* for 6-byte r/w commands, transfer length 0
+ * means 256 blocks of data, not 0 block.
+ */
+ if (!n_block)
+ n_block = 256;
+ break;
+ case READ_16:
+ case WRITE_16:
+ if (unlikely(scmd->cmd_len < 16))
+ goto invalid_fld;
+ scsi_16_lba_len(cdb, &block, &n_block);
+ if (unlikely(cdb[1] & (1 << 3)))
+ tf_flags |= ATA_TFLAG_FUA;
+ break;
+ default:
+ DPRINTK("no-byte command\n");
+ goto invalid_fld;
+ }
+
+ /* Check and compose ATA command */
+ if (!n_block)
+ /* For 10-byte and 16-byte SCSI R/W commands, transfer
+ * length 0 means transfer 0 block of data.
+ * However, for ATA R/W commands, sector count 0 means
+ * 256 or 65536 sectors, not 0 sectors as in SCSI.
+ *
+ * WARNING: one or two older ATA drives treat 0 as 0...
+ */
+ goto nothing_to_do;
+
+ qc->flags |= ATA_QCFLAG_IO;
+ qc->nbytes = n_block * ATA_SECT_SIZE;
+
+ rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
+ qc->tag);
+ if (likely(rc == 0))
+ return 0;
+
+ if (rc == -ERANGE)
+ goto out_of_range;
+ /* treat all other errors as -EINVAL, fall through */
+invalid_fld:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ return 1;
+
+out_of_range:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+ /* "Logical Block Address out of range" */
+ return 1;
+
+nothing_to_do:
+ scmd->result = SAM_STAT_GOOD;
+ return 1;
+}
+
+static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ u8 *cdb = cmd->cmnd;
+ int need_sense = (qc->err_mask != 0);
+
+ /* For ATA pass thru (SAT) commands, generate a sense block if
+ * user mandated it or if there's an error. Note that if we
+ * generate because the user forced us to, a check condition
+ * is generated and the ATA register values are returned
+ * whether the command completed successfully or not. If there
+ * was no error, SK, ASC and ASCQ will all be zero.
+ */
+ if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
+ ((cdb[2] & 0x20) || need_sense)) {
+ ata_gen_passthru_sense(qc);
+ } else {
+ if (!need_sense) {
+ cmd->result = SAM_STAT_GOOD;
+ } else {
+ /* TODO: decide which descriptor format to use
+ * for 48b LBA devices and call that here
+ * instead of the fixed desc, which is only
+ * good for smaller LBA (and maybe CHS?)
+ * devices.
+ */
+ ata_gen_ata_sense(qc);
+ }
+ }
+
+ /* XXX: track spindown state for spindown skipping and warning */
+ if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
+ qc->tf.command == ATA_CMD_STANDBYNOW1))
+ qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
+ else if (likely(system_state != SYSTEM_HALT &&
+ system_state != SYSTEM_POWER_OFF))
+ qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
+
+ if (need_sense && !ap->ops->error_handler)
+ ata_dump_status(ap->print_id, &qc->result_tf);
+
+ qc->scsidone(cmd);
+
+ ata_qc_free(qc);
+}
+
+/**
+ * ata_scsi_translate - Translate then issue SCSI command to ATA device
+ * @dev: ATA device to which the command is addressed
+ * @cmd: SCSI command to execute
+ * @done: SCSI command completion function
+ * @xlat_func: Actor which translates @cmd to an ATA taskfile
+ *
+ * Our ->queuecommand() function has decided that the SCSI
+ * command issued can be directly translated into an ATA
+ * command, rather than handled internally.
+ *
+ * This function sets up an ata_queued_cmd structure for the
+ * SCSI command, and sends that ata_queued_cmd to the hardware.
+ *
+ * The xlat_func argument (actor) returns 0 if ready to execute
+ * ATA command, else 1 to finish translation. If 1 is returned
+ * then cmd->result (and possibly cmd->sense_buffer) are assumed
+ * to be set reflecting an error condition or clean (early)
+ * termination.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
+ * needs to be deferred.
+ */
+static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *),
+ ata_xlat_func_t xlat_func)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_queued_cmd *qc;
+ int rc;
+
+ VPRINTK("ENTER\n");
+
+ qc = ata_scsi_qc_new(dev, cmd, done);
+ if (!qc)
+ goto err_mem;
+
+ /* data is present; dma-map it */
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (unlikely(scsi_bufflen(cmd) < 1)) {
+ ata_dev_printk(dev, KERN_WARNING,
+ "WARNING: zero len r/w req\n");
+ goto err_did;
+ }
+
+ ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
+
+ qc->dma_dir = cmd->sc_data_direction;
+ }
+
+ qc->complete_fn = ata_scsi_qc_complete;
+
+ if (xlat_func(qc))
+ goto early_finish;
+
+ if (ap->ops->qc_defer) {
+ if ((rc = ap->ops->qc_defer(qc)))
+ goto defer;
+ }
+
+ /* select device, send command to hardware */
+ ata_qc_issue(qc);
+
+ VPRINTK("EXIT\n");
+ return 0;
+
+early_finish:
+ ata_qc_free(qc);
+ qc->scsidone(cmd);
+ DPRINTK("EXIT - early finish (good or error)\n");
+ return 0;
+
+err_did:
+ ata_qc_free(qc);
+ cmd->result = (DID_ERROR << 16);
+ qc->scsidone(cmd);
+err_mem:
+ DPRINTK("EXIT - internal\n");
+ return 0;
+
+defer:
+ ata_qc_free(qc);
+ DPRINTK("EXIT - defer\n");
+ if (rc == ATA_DEFER_LINK)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ else
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ * ata_scsi_rbuf_get - Map response buffer.
+ * @cmd: SCSI command containing buffer to be mapped.
+ * @flags: unsigned long variable to store irq enable status
+ * @copy_in: copy in from user buffer
+ *
+ * Prepare buffer for simulated SCSI commands.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(ata_scsi_rbuf_lock) on success
+ *
+ * RETURNS:
+ * Pointer to response buffer.
+ */
+static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
+
+ memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
+ if (copy_in)
+ sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+ ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ return ata_scsi_rbuf;
+}
+
+/**
+ * ata_scsi_rbuf_put - Unmap response buffer.
+ * @cmd: SCSI command containing buffer to be unmapped.
+ * @copy_out: copy out result
+ * @flags: @flags passed to ata_scsi_rbuf_get()
+ *
+ * Returns rbuf buffer. The result is copied to @cmd's buffer if
+ * @copy_back is true.
+ *
+ * LOCKING:
+ * Unlocks ata_scsi_rbuf_lock.
+ */
+static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
+ unsigned long *flags)
+{
+ if (copy_out)
+ sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+ ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
+}
+
+/**
+ * ata_scsi_rbuf_fill - wrapper for SCSI command simulators
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @actor: Callback hook for desired SCSI command simulator
+ *
+ * Takes care of the hard work of simulating a SCSI command...
+ * Mapping the response buffer, calling the command's handler,
+ * and handling the handler's return value. This return value
+ * indicates whether the handler wishes the SCSI command to be
+ * completed successfully (0), or not (in which case cmd->result
+ * and sense buffer are assumed to be set).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+ unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+{
+ u8 *rbuf;
+ unsigned int rc;
+ struct scsi_cmnd *cmd = args->cmd;
+ unsigned long flags;
+
+ rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
+ rc = actor(args, rbuf);
+ ata_scsi_rbuf_put(cmd, rc == 0, &flags);
+
+ if (rc == 0)
+ cmd->result = SAM_STAT_GOOD;
+ args->done(cmd);
+}
+
+/**
+ * ata_scsiop_inq_std - Simulate INQUIRY command
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns standard device identification data associated
+ * with non-VPD INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+{
+ const u8 versions[] = {
+ 0x60, /* SAM-3 (no version claimed) */
+
+ 0x03,
+ 0x20, /* SBC-2 (no version claimed) */
+
+ 0x02,
+ 0x60 /* SPC-3 (no version claimed) */
+ };
+ u8 hdr[] = {
+ TYPE_DISK,
+ 0,
+ 0x5, /* claim SPC-3 version compatibility */
+ 2,
+ 95 - 4
+ };
+
+ VPRINTK("ENTER\n");
+
+ /* set scsi removeable (RMB) bit per ata bit */
+ if (ata_id_removeable(args->id))
+ hdr[1] |= (1 << 7);
+
+ memcpy(rbuf, hdr, sizeof(hdr));
+ memcpy(&rbuf[8], "ATA ", 8);
+ ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+ ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+
+ if (rbuf[32] == 0 || rbuf[32] == ' ')
+ memcpy(&rbuf[32], "n/a ", 4);
+
+ memcpy(rbuf + 59, versions, sizeof(versions));
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns list of inquiry VPD pages available.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+{
+ const u8 pages[] = {
+ 0x00, /* page 0x00, this page */
+ 0x80, /* page 0x80, unit serial no page */
+ 0x83, /* page 0x83, device ident page */
+ 0x89, /* page 0x89, ata info page */
+ 0xb1, /* page 0xb1, block device characteristics page */
+ };
+
+ rbuf[3] = sizeof(pages); /* number of supported VPD pages */
+ memcpy(rbuf + 4, pages, sizeof(pages));
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns ATA device serial number.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+{
+ const u8 hdr[] = {
+ 0,
+ 0x80, /* this page code */
+ 0,
+ ATA_ID_SERNO_LEN, /* page len */
+ };
+
+ memcpy(rbuf, hdr, sizeof(hdr));
+ ata_id_string(args->id, (unsigned char *) &rbuf[4],
+ ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields two logical unit device identification designators:
+ * - vendor specific ASCII containing the ATA serial number
+ * - SAT defined "t10 vendor id based" containing ASCII vendor
+ * name ("ATA "), model and serial numbers.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+{
+ const int sat_model_serial_desc_len = 68;
+ int num;
+
+ rbuf[1] = 0x83; /* this page code */
+ num = 4;
+
+ /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
+ rbuf[num + 0] = 2;
+ rbuf[num + 3] = ATA_ID_SERNO_LEN;
+ num += 4;
+ ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+ num += ATA_ID_SERNO_LEN;
+
+ /* SAT defined lu model and serial numbers descriptor */
+ /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
+ rbuf[num + 0] = 2;
+ rbuf[num + 1] = 1;
+ rbuf[num + 3] = sat_model_serial_desc_len;
+ num += 4;
+ memcpy(rbuf + num, "ATA ", 8);
+ num += 8;
+ ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ATA_ID_PROD_LEN);
+ num += ATA_ID_PROD_LEN;
+ ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ATA_ID_SERNO_LEN);
+ num += ATA_ID_SERNO_LEN;
+
+ rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
+ return 0;
+}
+
+/**
+ * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields SAT-specified ATA VPD page.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_taskfile tf;
+
+ memset(&tf, 0, sizeof(tf));
+
+ rbuf[1] = 0x89; /* our page code */
+ rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
+ rbuf[3] = (0x238 & 0xff);
+
+ memcpy(&rbuf[8], "linux ", 8);
+ memcpy(&rbuf[16], "libata ", 16);
+ memcpy(&rbuf[32], DRV_VERSION, 4);
+ ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+
+ /* we don't store the ATA device signature, so we fake it */
+
+ tf.command = ATA_DRDY; /* really, this is Status reg */
+ tf.lbal = 0x1;
+ tf.nsect = 0x1;
+
+ ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */
+ rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
+
+ rbuf[56] = ATA_CMD_ID_ATA;
+
+ memcpy(&rbuf[60], &args->id[0], 512);
+ return 0;
+}
+
+static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+{
+ rbuf[1] = 0xb1;
+ rbuf[3] = 0x3c;
+ if (ata_id_major_version(args->id) > 7) {
+ rbuf[4] = args->id[217] >> 8;
+ rbuf[5] = args->id[217];
+ rbuf[7] = args->id[168] & 0xf;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_noop - Command handler that simply returns success.
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * No operation. Simply returns success to caller, to indicate
+ * that the caller should successfully complete this SCSI command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
+{
+ VPRINTK("ENTER\n");
+ return 0;
+}
+
+/**
+ * ata_msense_caching - Simulate MODE SENSE caching info page
+ * @id: device IDENTIFY data
+ * @buf: output buffer
+ *
+ * Generate a caching info page, which conditionally indicates
+ * write caching to the SCSI layer, depending on device
+ * capabilities.
+ *
+ * LOCKING:
+ * None.
+ */
+static unsigned int ata_msense_caching(u16 *id, u8 *buf)
+{
+ memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
+ if (ata_id_wcache_enabled(id))
+ buf[2] |= (1 << 2); /* write cache enable */
+ if (!ata_id_rahead_enabled(id))
+ buf[12] |= (1 << 5); /* disable read ahead */
+ return sizeof(def_cache_mpage);
+}
+
+/**
+ * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ * @buf: output buffer
+ *
+ * Generate a generic MODE SENSE control mode page.
+ *
+ * LOCKING:
+ * None.
+ */
+static unsigned int ata_msense_ctl_mode(u8 *buf)
+{
+ memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
+ return sizeof(def_control_mpage);
+}
+
+/**
+ * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
+ * @buf: output buffer
+ *
+ * Generate a generic MODE SENSE r/w error recovery page.
+ *
+ * LOCKING:
+ * None.
+ */
+static unsigned int ata_msense_rw_recovery(u8 *buf)
+{
+ memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
+ return sizeof(def_rw_recovery_mpage);
+}
+
+/*
+ * We can turn this into a real blacklist if it's needed, for now just
+ * blacklist any Maxtor BANC1G10 revision firmware
+ */
+static int ata_dev_supports_fua(u16 *id)
+{
+ unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
+
+ if (!libata_fua)
+ return 0;
+ if (!ata_id_has_fua(id))
+ return 0;
+
+ ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
+ ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
+
+ if (strcmp(model, "Maxtor"))
+ return 1;
+ if (strcmp(fw, "BANC1G10"))
+ return 1;
+
+ return 0; /* blacklisted */
+}
+
+/**
+ * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate MODE SENSE commands. Assume this is invoked for direct
+ * access devices (e.g. disks) only. There should be no block
+ * descriptor for other device types.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_device *dev = args->dev;
+ u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+ const u8 sat_blk_desc[] = {
+ 0, 0, 0, 0, /* number of blocks: sat unspecified */
+ 0,
+ 0, 0x2, 0x0 /* block length: 512 bytes */
+ };
+ u8 pg, spg;
+ unsigned int ebd, page_control, six_byte;
+ u8 dpofua;
+
+ VPRINTK("ENTER\n");
+
+ six_byte = (scsicmd[0] == MODE_SENSE);
+ ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
+ /*
+ * LLBA bit in msense(10) ignored (compliant)
+ */
+
+ page_control = scsicmd[2] >> 6;
+ switch (page_control) {
+ case 0: /* current */
+ break; /* supported */
+ case 3: /* saved */
+ goto saving_not_supp;
+ case 1: /* changeable */
+ case 2: /* defaults */
+ default:
+ goto invalid_fld;
+ }
+
+ if (six_byte)
+ p += 4 + (ebd ? 8 : 0);
+ else
+ p += 8 + (ebd ? 8 : 0);
+
+ pg = scsicmd[2] & 0x3f;
+ spg = scsicmd[3];
+ /*
+ * No mode subpages supported (yet) but asking for _all_
+ * subpages may be valid
+ */
+ if (spg && (spg != ALL_SUB_MPAGES))
+ goto invalid_fld;
+
+ switch(pg) {
+ case RW_RECOVERY_MPAGE:
+ p += ata_msense_rw_recovery(p);
+ break;
+
+ case CACHE_MPAGE:
+ p += ata_msense_caching(args->id, p);
+ break;
+
+ case CONTROL_MPAGE:
+ p += ata_msense_ctl_mode(p);
+ break;
+
+ case ALL_MPAGES:
+ p += ata_msense_rw_recovery(p);
+ p += ata_msense_caching(args->id, p);
+ p += ata_msense_ctl_mode(p);
+ break;
+
+ default: /* invalid page code */
+ goto invalid_fld;
+ }
+
+ dpofua = 0;
+ if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
+ (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
+ dpofua = 1 << 4;
+
+ if (six_byte) {
+ rbuf[0] = p - rbuf - 1;
+ rbuf[2] |= dpofua;
+ if (ebd) {
+ rbuf[3] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+ } else {
+ unsigned int output_len = p - rbuf - 2;
+
+ rbuf[0] = output_len >> 8;
+ rbuf[1] = output_len;
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+ }
+ return 0;
+
+invalid_fld:
+ ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ return 1;
+
+saving_not_supp:
+ ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ /* "Saving parameters not supported" */
+ return 1;
+}
+
+/**
+ * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate READ CAPACITY commands.
+ *
+ * LOCKING:
+ * None.
+ */
+static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+{
+ u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
+
+ VPRINTK("ENTER\n");
+
+ if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (last_lba >= 0xffffffffULL)
+ last_lba = 0xffffffff;
+
+ /* sector count, 32-bit */
+ rbuf[0] = last_lba >> (8 * 3);
+ rbuf[1] = last_lba >> (8 * 2);
+ rbuf[2] = last_lba >> (8 * 1);
+ rbuf[3] = last_lba;
+
+ /* sector size */
+ rbuf[6] = ATA_SECT_SIZE >> 8;
+ rbuf[7] = ATA_SECT_SIZE & 0xff;
+ } else {
+ /* sector count, 64-bit */
+ rbuf[0] = last_lba >> (8 * 7);
+ rbuf[1] = last_lba >> (8 * 6);
+ rbuf[2] = last_lba >> (8 * 5);
+ rbuf[3] = last_lba >> (8 * 4);
+ rbuf[4] = last_lba >> (8 * 3);
+ rbuf[5] = last_lba >> (8 * 2);
+ rbuf[6] = last_lba >> (8 * 1);
+ rbuf[7] = last_lba;
+
+ /* sector size */
+ rbuf[10] = ATA_SECT_SIZE >> 8;
+ rbuf[11] = ATA_SECT_SIZE & 0xff;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_scsiop_report_luns - Simulate REPORT LUNS command
+ * @args: device IDENTIFY data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Simulate REPORT LUNS command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+{
+ VPRINTK("ENTER\n");
+ rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
+
+ return 0;
+}
+
+static void atapi_sense_complete(struct ata_queued_cmd *qc)
+{
+ if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
+ /* FIXME: not quite right; we don't want the
+ * translation of taskfile registers into
+ * a sense descriptors, since that's only
+ * correct for ATA, not ATAPI
+ */
+ ata_gen_passthru_sense(qc);
+ }
+
+ qc->scsidone(qc->scsicmd);
+ ata_qc_free(qc);
+}
+
+/* is it pointless to prefer PIO for "safety reasons"? */
+static inline int ata_pio_use_silly(struct ata_port *ap)
+{
+ return (ap->flags & ATA_FLAG_PIO_DMA);
+}
+
+static void atapi_request_sense(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scsi_cmnd *cmd = qc->scsicmd;
+
+ DPRINTK("ATAPI request sense\n");
+
+ /* FIXME: is this needed? */
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+#ifdef CONFIG_ATA_SFF
+ if (ap->ops->sff_tf_read)
+ ap->ops->sff_tf_read(ap, &qc->tf);
+#endif
+
+ /* fill these in, for the case where they are -not- overwritten */
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = qc->tf.feature >> 4;
+
+ ata_qc_reinit(qc);
+
+ /* setup sg table and init transfer direction */
+ sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ ata_sg_init(qc, &qc->sgent, 1);
+ qc->dma_dir = DMA_FROM_DEVICE;
+
+ memset(&qc->cdb, 0, qc->dev->cdb_len);
+ qc->cdb[0] = REQUEST_SENSE;
+ qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+ qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ qc->tf.command = ATA_CMD_PACKET;
+
+ if (ata_pio_use_silly(ap)) {
+ qc->tf.protocol = ATAPI_PROT_DMA;
+ qc->tf.feature |= ATAPI_PKT_DMA;
+ } else {
+ qc->tf.protocol = ATAPI_PROT_PIO;
+ qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
+ qc->tf.lbah = 0;
+ }
+ qc->nbytes = SCSI_SENSE_BUFFERSIZE;
+
+ qc->complete_fn = atapi_sense_complete;
+
+ ata_qc_issue(qc);
+
+ DPRINTK("EXIT\n");
+}
+
+static void atapi_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ unsigned int err_mask = qc->err_mask;
+
+ VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
+
+ /* handle completion from new EH */
+ if (unlikely(qc->ap->ops->error_handler &&
+ (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+
+ if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+ /* FIXME: not quite right; we don't want the
+ * translation of taskfile registers into a
+ * sense descriptors, since that's only
+ * correct for ATA, not ATAPI
+ */
+ ata_gen_passthru_sense(qc);
+ }
+
+ /* SCSI EH automatically locks door if sdev->locked is
+ * set. Sometimes door lock request continues to
+ * fail, for example, when no media is present. This
+ * creates a loop - SCSI EH issues door lock which
+ * fails and gets invoked again to acquire sense data
+ * for the failed command.
+ *
+ * If door lock fails, always clear sdev->locked to
+ * avoid this infinite loop.
+ */
+ if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+ qc->dev->sdev->locked = 0;
+
+ qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
+ qc->scsidone(cmd);
+ ata_qc_free(qc);
+ return;
+ }
+
+ /* successful completion or old EH failure path */
+ if (unlikely(err_mask & AC_ERR_DEV)) {
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+ atapi_request_sense(qc);
+ return;
+ } else if (unlikely(err_mask)) {
+ /* FIXME: not quite right; we don't want the
+ * translation of taskfile registers into
+ * a sense descriptors, since that's only
+ * correct for ATA, not ATAPI
+ */
+ ata_gen_passthru_sense(qc);
+ } else {
+ u8 *scsicmd = cmd->cmnd;
+
+ if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
+ unsigned long flags;
+ u8 *buf;
+
+ buf = ata_scsi_rbuf_get(cmd, true, &flags);
+
+ /* ATAPI devices typically report zero for their SCSI version,
+ * and sometimes deviate from the spec WRT response data
+ * format. If SCSI version is reported as zero like normal,
+ * then we make the following fixups: 1) Fake MMC-5 version,
+ * to indicate to the Linux scsi midlayer this is a modern
+ * device. 2) Ensure response data format / ATAPI information
+ * are always correct.
+ */
+ if (buf[2] == 0) {
+ buf[2] = 0x5;
+ buf[3] = 0x32;
+ }
+
+ ata_scsi_rbuf_put(cmd, true, &flags);
+ }
+
+ cmd->result = SAM_STAT_GOOD;
+ }
+
+ qc->scsidone(cmd);
+ ata_qc_free(qc);
+}
+/**
+ * atapi_xlat - Initialize PACKET taskfile
+ * @qc: command structure to be initialized
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on failure.
+ */
+static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct ata_device *dev = qc->dev;
+ int nodata = (scmd->sc_data_direction == DMA_NONE);
+ int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
+ unsigned int nbytes;
+
+ memset(qc->cdb, 0, dev->cdb_len);
+ memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
+
+ qc->complete_fn = atapi_qc_complete;
+
+ qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ qc->tf.flags |= ATA_TFLAG_WRITE;
+ DPRINTK("direction: write\n");
+ }
+
+ qc->tf.command = ATA_CMD_PACKET;
+ ata_qc_set_pc_nbytes(qc);
+
+ /* check whether ATAPI DMA is safe */
+ if (!nodata && !using_pio && atapi_check_dma(qc))
+ using_pio = 1;
+
+ /* Some controller variants snoop this value for Packet
+ * transfers to do state machine and FIFO management. Thus we
+ * want to set it properly, and for DMA where it is
+ * effectively meaningless.
+ */
+ nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
+
+ /* Most ATAPI devices which honor transfer chunk size don't
+ * behave according to the spec when odd chunk size which
+ * matches the transfer length is specified. If the number of
+ * bytes to transfer is 2n+1. According to the spec, what
+ * should happen is to indicate that 2n+1 is going to be
+ * transferred and transfer 2n+2 bytes where the last byte is
+ * padding.
+ *
+ * In practice, this doesn't happen. ATAPI devices first
+ * indicate and transfer 2n bytes and then indicate and
+ * transfer 2 bytes where the last byte is padding.
+ *
+ * This inconsistency confuses several controllers which
+ * perform PIO using DMA such as Intel AHCIs and sil3124/32.
+ * These controllers use actual number of transferred bytes to
+ * update DMA poitner and transfer of 4n+2 bytes make those
+ * controller push DMA pointer by 4n+4 bytes because SATA data
+ * FISes are aligned to 4 bytes. This causes data corruption
+ * and buffer overrun.
+ *
+ * Always setting nbytes to even number solves this problem
+ * because then ATAPI devices don't have to split data at 2n
+ * boundaries.
+ */
+ if (nbytes & 0x1)
+ nbytes++;
+
+ qc->tf.lbam = (nbytes & 0xFF);
+ qc->tf.lbah = (nbytes >> 8);
+
+ if (nodata)
+ qc->tf.protocol = ATAPI_PROT_NODATA;
+ else if (using_pio)
+ qc->tf.protocol = ATAPI_PROT_PIO;
+ else {
+ /* DMA data xfer */
+ qc->tf.protocol = ATAPI_PROT_DMA;
+ qc->tf.feature |= ATAPI_PKT_DMA;
+
+ if ((dev->flags & ATA_DFLAG_DMADIR) &&
+ (scmd->sc_data_direction != DMA_TO_DEVICE))
+ /* some SATA bridges need us to indicate data xfer direction */
+ qc->tf.feature |= ATAPI_DMADIR;
+ }
+
+
+ /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
+ as ATAPI tape drives don't get this right otherwise */
+ return 0;
+}
+
+static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+{
+ if (!sata_pmp_attached(ap)) {
+ if (likely(devno < ata_link_max_devices(&ap->link)))
+ return &ap->link.device[devno];
+ } else {
+ if (likely(devno < ap->nr_pmp_links))
+ return &ap->pmp_link[devno].device[0];
+ }
+
+ return NULL;
+}
+
+static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
+ const struct scsi_device *scsidev)
+{
+ int devno;
+
+ /* skip commands not addressed to targets we simulate */
+ if (!sata_pmp_attached(ap)) {
+ if (unlikely(scsidev->channel || scsidev->lun))
+ return NULL;
+ devno = scsidev->id;
+ } else {
+ if (unlikely(scsidev->id || scsidev->lun))
+ return NULL;
+ devno = scsidev->channel;
+ }
+
+ return ata_find_dev(ap, devno);
+}
+
+/**
+ * ata_scsi_find_dev - lookup ata_device from scsi_cmnd
+ * @ap: ATA port to which the device is attached
+ * @scsidev: SCSI device from which we derive the ATA device
+ *
+ * Given various information provided in struct scsi_cmnd,
+ * map that onto an ATA bus, and using that mapping
+ * determine which ata_device is associated with the
+ * SCSI command to be sent.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Associated ATA device, or %NULL if not found.
+ */
+static struct ata_device *
+ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
+{
+ struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
+
+ if (unlikely(!dev || !ata_dev_enabled(dev)))
+ return NULL;
+
+ return dev;
+}
+
+/*
+ * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
+ * @byte1: Byte 1 from pass-thru CDB.
+ *
+ * RETURNS:
+ * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
+ */
+static u8
+ata_scsi_map_proto(u8 byte1)
+{
+ switch((byte1 & 0x1e) >> 1) {
+ case 3: /* Non-data */
+ return ATA_PROT_NODATA;
+
+ case 6: /* DMA */
+ case 10: /* UDMA Data-in */
+ case 11: /* UDMA Data-Out */
+ return ATA_PROT_DMA;
+
+ case 4: /* PIO Data-in */
+ case 5: /* PIO Data-out */
+ return ATA_PROT_PIO;
+
+ case 0: /* Hard Reset */
+ case 1: /* SRST */
+ case 8: /* Device Diagnostic */
+ case 9: /* Device Reset */
+ case 7: /* DMA Queued */
+ case 12: /* FPDMA */
+ case 15: /* Return Response Info */
+ default: /* Reserved */
+ break;
+ }
+
+ return ATA_PROT_UNKNOWN;
+}
+
+/**
+ * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
+ * @qc: command structure to be initialized
+ *
+ * Handles either 12 or 16-byte versions of the CDB.
+ *
+ * RETURNS:
+ * Zero on success, non-zero on failure.
+ */
+static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *tf = &(qc->tf);
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct ata_device *dev = qc->dev;
+ const u8 *cdb = scmd->cmnd;
+
+ if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
+ goto invalid_fld;
+
+ /*
+ * Filter TPM commands by default. These provide an
+ * essentially uncontrolled encrypted "back door" between
+ * applications and the disk. Set libata.allow_tpm=1 if you
+ * have a real reason for wanting to use them. This ensures
+ * that installed software cannot easily mess stuff up without
+ * user intent. DVR type users will probably ship with this enabled
+ * for movie content management.
+ *
+ * Note that for ATA8 we can issue a DCS change and DCS freeze lock
+ * for this and should do in future but that it is not sufficient as
+ * DCS is an optional feature set. Thus we also do the software filter
+ * so that we comply with the TC consortium stated goal that the user
+ * can turn off TC features of their system.
+ */
+ if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
+ goto invalid_fld;
+
+ /* We may not issue DMA commands if no DMA mode is set */
+ if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+ goto invalid_fld;
+
+ /*
+ * 12 and 16 byte CDBs use different offsets to
+ * provide the various register values.
+ */
+ if (cdb[0] == ATA_16) {
+ /*
+ * 16-byte CDB - may contain extended commands.
+ *
+ * If that is the case, copy the upper byte register values.
+ */
+ if (cdb[1] & 0x01) {
+ tf->hob_feature = cdb[3];
+ tf->hob_nsect = cdb[5];
+ tf->hob_lbal = cdb[7];
+ tf->hob_lbam = cdb[9];
+ tf->hob_lbah = cdb[11];
+ tf->flags |= ATA_TFLAG_LBA48;
+ } else
+ tf->flags &= ~ATA_TFLAG_LBA48;
+
+ /*
+ * Always copy low byte, device and command registers.
+ */
+ tf->feature = cdb[4];
+ tf->nsect = cdb[6];
+ tf->lbal = cdb[8];
+ tf->lbam = cdb[10];
+ tf->lbah = cdb[12];
+ tf->device = cdb[13];
+ tf->command = cdb[14];
+ } else {
+ /*
+ * 12-byte CDB - incapable of extended commands.
+ */
+ tf->flags &= ~ATA_TFLAG_LBA48;
+
+ tf->feature = cdb[3];
+ tf->nsect = cdb[4];
+ tf->lbal = cdb[5];
+ tf->lbam = cdb[6];
+ tf->lbah = cdb[7];
+ tf->device = cdb[8];
+ tf->command = cdb[9];
+ }
+
+ /* enforce correct master/slave bit */
+ tf->device = dev->devno ?
+ tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
+
+ /* sanity check for pio multi commands */
+ if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
+ goto invalid_fld;
+
+ if (is_multi_taskfile(tf)) {
+ unsigned int multi_count = 1 << (cdb[1] >> 5);
+
+ /* compare the passed through multi_count
+ * with the cached multi_count of libata
+ */
+ if (multi_count != dev->multi_count)
+ ata_dev_printk(dev, KERN_WARNING,
+ "invalid multi_count %u ignored\n",
+ multi_count);
+ }
+
+ /* READ/WRITE LONG use a non-standard sect_size */
+ qc->sect_size = ATA_SECT_SIZE;
+ switch (tf->command) {
+ case ATA_CMD_READ_LONG:
+ case ATA_CMD_READ_LONG_ONCE:
+ case ATA_CMD_WRITE_LONG:
+ case ATA_CMD_WRITE_LONG_ONCE:
+ if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+ goto invalid_fld;
+ qc->sect_size = scsi_bufflen(scmd);
+ }
+
+ /*
+ * Filter SET_FEATURES - XFER MODE command -- otherwise,
+ * SET_FEATURES - XFER MODE must be preceded/succeeded
+ * by an update to hardware-specific registers for each
+ * controller (i.e. the reason for ->set_piomode(),
+ * ->set_dmamode(), and ->post_set_mode() hooks).
+ */
+ if ((tf->command == ATA_CMD_SET_FEATURES)
+ && (tf->feature == SETFEATURES_XFER))
+ goto invalid_fld;
+
+ /*
+ * Set flags so that all registers will be written,
+ * and pass on write indication (used for PIO/DMA
+ * setup.)
+ */
+ tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ tf->flags |= ATA_TFLAG_WRITE;
+
+ /*
+ * Set transfer length.
+ *
+ * TODO: find out if we need to do more here to
+ * cover scatter/gather case.
+ */
+ ata_qc_set_pc_nbytes(qc);
+
+ /* request result TF and be quiet about device error */
+ qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+
+ return 0;
+
+ invalid_fld:
+ ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
+ /* "Invalid field in cdb" */
+ return 1;
+}
+
+/**
+ * ata_get_xlat_func - check if SCSI to ATA translation is possible
+ * @dev: ATA device
+ * @cmd: SCSI command opcode to consider
+ *
+ * Look up the SCSI command given, and determine whether the
+ * SCSI command is to be translated or simulated.
+ *
+ * RETURNS:
+ * Pointer to translation function if possible, %NULL if not.
+ */
+
+static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
+{
+ switch (cmd) {
+ case READ_6:
+ case READ_10:
+ case READ_16:
+
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
+ return ata_scsi_rw_xlat;
+
+ case SYNCHRONIZE_CACHE:
+ if (ata_try_flush_cache(dev))
+ return ata_scsi_flush_xlat;
+ break;
+
+ case VERIFY:
+ case VERIFY_16:
+ return ata_scsi_verify_xlat;
+
+ case ATA_12:
+ case ATA_16:
+ return ata_scsi_pass_thru;
+
+ case START_STOP:
+ return ata_scsi_start_stop_xlat;
+ }
+
+ return NULL;
+}
+
+/**
+ * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
+ * @ap: ATA port to which the command was being sent
+ * @cmd: SCSI command to dump
+ *
+ * Prints the contents of a SCSI command via printk().
+ */
+
+static inline void ata_scsi_dump_cdb(struct ata_port *ap,
+ struct scsi_cmnd *cmd)
+{
+#ifdef ATA_DEBUG
+ struct scsi_device *scsidev = cmd->device;
+ u8 *scsicmd = cmd->cmnd;
+
+ DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ap->print_id,
+ scsidev->channel, scsidev->id, scsidev->lun,
+ scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
+ scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
+ scsicmd[8]);
+#endif
+}
+
+static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
+ void (*done)(struct scsi_cmnd *),
+ struct ata_device *dev)
+{
+ u8 scsi_op = scmd->cmnd[0];
+ ata_xlat_func_t xlat_func;
+ int rc = 0;
+
+ if (dev->class == ATA_DEV_ATA) {
+ if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+ goto bad_cdb_len;
+
+ xlat_func = ata_get_xlat_func(dev, scsi_op);
+ } else {
+ if (unlikely(!scmd->cmd_len))
+ goto bad_cdb_len;
+
+ xlat_func = NULL;
+ if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+ /* relay SCSI command to ATAPI device */
+ int len = COMMAND_SIZE(scsi_op);
+ if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
+ goto bad_cdb_len;
+
+ xlat_func = atapi_xlat;
+ } else {
+ /* ATA_16 passthru, treat as an ATA command */
+ if (unlikely(scmd->cmd_len > 16))
+ goto bad_cdb_len;
+
+ xlat_func = ata_get_xlat_func(dev, scsi_op);
+ }
+ }
+
+ if (xlat_func)
+ rc = ata_scsi_translate(dev, scmd, done, xlat_func);
+ else
+ ata_scsi_simulate(dev, scmd, done);
+
+ return rc;
+
+ bad_cdb_len:
+ DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
+ scmd->cmd_len, scsi_op, dev->cdb_len);
+ scmd->result = DID_ERROR << 16;
+ done(scmd);
+ return 0;
+}
+
+/**
+ * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
+ * @cmd: SCSI command to be sent
+ * @done: Completion function, called when command is complete
+ *
+ * In some cases, this function translates SCSI commands into
+ * ATA taskfiles, and queues the taskfiles to be sent to
+ * hardware. In other cases, this function simulates a
+ * SCSI device by evaluating and responding to certain
+ * SCSI commands. This creates the overall effect of
+ * ATA and ATAPI devices appearing as SCSI devices.
+ *
+ * LOCKING:
+ * Releases scsi-layer-held lock, and obtains host lock.
+ *
+ * RETURNS:
+ * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ * 0 otherwise.
+ */
+int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct ata_port *ap;
+ struct ata_device *dev;
+ struct scsi_device *scsidev = cmd->device;
+ struct Scsi_Host *shost = scsidev->host;
+ int rc = 0;
+
+ ap = ata_shost_to_port(shost);
+
+ spin_unlock(shost->host_lock);
+ spin_lock(ap->lock);
+
+ ata_scsi_dump_cdb(ap, cmd);
+
+ dev = ata_scsi_find_dev(ap, scsidev);
+ if (likely(dev))
+ rc = __ata_scsi_queuecmd(cmd, done, dev);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ }
+
+ spin_unlock(ap->lock);
+ spin_lock(shost->host_lock);
+ return rc;
+}
+
+/**
+ * ata_scsi_simulate - simulate SCSI command on ATA device
+ * @dev: the target device
+ * @cmd: SCSI command being sent to device.
+ * @done: SCSI command completion function.
+ *
+ * Interprets and directly executes a select list of SCSI commands
+ * that can be handled internally.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+
+void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ata_scsi_args args;
+ const u8 *scsicmd = cmd->cmnd;
+ u8 tmp8;
+
+ args.dev = dev;
+ args.id = dev->id;
+ args.cmd = cmd;
+ args.done = done;
+
+ switch(scsicmd[0]) {
+ /* TODO: worth improving? */
+ case FORMAT_UNIT:
+ ata_scsi_invalid_field(cmd, done);
+ break;
+
+ case INQUIRY:
+ if (scsicmd[1] & 2) /* is CmdDt set? */
+ ata_scsi_invalid_field(cmd, done);
+ else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
+ else switch (scsicmd[2]) {
+ case 0x00:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
+ break;
+ case 0x80:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
+ break;
+ case 0x83:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
+ break;
+ case 0x89:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
+ break;
+ case 0xb1:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
+ break;
+ default:
+ ata_scsi_invalid_field(cmd, done);
+ break;
+ }
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ break;
+
+ case MODE_SELECT: /* unconditionally return */
+ case MODE_SELECT_10: /* bad-field-in-cdb */
+ ata_scsi_invalid_field(cmd, done);
+ break;
+
+ case READ_CAPACITY:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+ break;
+
+ case SERVICE_ACTION_IN:
+ if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
+ else
+ ata_scsi_invalid_field(cmd, done);
+ break;
+
+ case REPORT_LUNS:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ break;
+
+ case REQUEST_SENSE:
+ ata_scsi_set_sense(cmd, 0, 0, 0);
+ cmd->result = (DRIVER_SENSE << 24);
+ done(cmd);
+ break;
+
+ /* if we reach this, then writeback caching is disabled,
+ * turning this into a no-op.
+ */
+ case SYNCHRONIZE_CACHE:
+ /* fall through */
+
+ /* no-op's, complete with success */
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ case TEST_UNIT_READY:
+ ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
+ break;
+
+ case SEND_DIAGNOSTIC:
+ tmp8 = scsicmd[1] & ~(1 << 3);
+ if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
+ ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
+ else
+ ata_scsi_invalid_field(cmd, done);
+ break;
+
+ /* all other commands */
+ default:
+ ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+ /* "Invalid command operation code" */
+ done(cmd);
+ break;
+ }
+}
+
+int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+{
+ int i, rc;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ struct Scsi_Host *shost;
+
+ rc = -ENOMEM;
+ shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
+ if (!shost)
+ goto err_alloc;
+
+ *(struct ata_port **)&shost->hostdata[0] = ap;
+ ap->scsi_host = shost;
+
+ shost->transportt = &ata_scsi_transport_template;
+ shost->unique_id = ap->print_id;
+ shost->max_id = 16;
+ shost->max_lun = 1;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
+
+ /* Schedule policy is determined by ->qc_defer()
+ * callback and it needs to see every deferred qc.
+ * Set host_blocked to 1 to prevent SCSI midlayer from
+ * automatically deferring requests.
+ */
+ shost->max_host_blocked = 1;
+
+ rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ if (rc)
+ goto err_add;
+ }
+
+ return 0;
+
+ err_add:
+ scsi_host_put(host->ports[i]->scsi_host);
+ err_alloc:
+ while (--i >= 0) {
+ struct Scsi_Host *shost = host->ports[i]->scsi_host;
+
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ }
+ return rc;
+}
+
+void ata_scsi_scan_host(struct ata_port *ap, int sync)
+{
+ int tries = 5;
+ struct ata_device *last_failed_dev = NULL;
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_DISABLED)
+ return;
+
+ repeat:
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct scsi_device *sdev;
+ int channel = 0, id = 0;
+
+ if (!ata_dev_enabled(dev) || dev->sdev)
+ continue;
+
+ if (ata_is_host_link(link))
+ id = dev->devno;
+ else
+ channel = link->pmp;
+
+ sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
+ NULL);
+ if (!IS_ERR(sdev)) {
+ dev->sdev = sdev;
+ scsi_device_put(sdev);
+ }
+ }
+ }
+
+ /* If we scanned while EH was in progress or allocation
+ * failure occurred, scan would have failed silently. Check
+ * whether all devices are attached.
+ */
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev) && !dev->sdev)
+ goto exit_loop;
+ }
+ }
+ exit_loop:
+ if (!link)
+ return;
+
+ /* we're missing some SCSI devices */
+ if (sync) {
+ /* If caller requested synchrnous scan && we've made
+ * any progress, sleep briefly and repeat.
+ */
+ if (dev != last_failed_dev) {
+ msleep(100);
+ last_failed_dev = dev;
+ goto repeat;
+ }
+
+ /* We might be failing to detect boot device, give it
+ * a few more chances.
+ */
+ if (--tries) {
+ msleep(100);
+ goto repeat;
+ }
+
+ ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
+ "failed without making any progress,\n"
+ " switching to async\n");
+ }
+
+ queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+ round_jiffies_relative(HZ));
+}
+
+/**
+ * ata_scsi_offline_dev - offline attached SCSI device
+ * @dev: ATA device to offline attached SCSI device for
+ *
+ * This function is called from ata_eh_hotplug() and responsible
+ * for taking the SCSI device attached to @dev offline. This
+ * function is called with host lock which protects dev->sdev
+ * against clearing.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 1 if attached SCSI device exists, 0 otherwise.
+ */
+int ata_scsi_offline_dev(struct ata_device *dev)
+{
+ if (dev->sdev) {
+ scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ata_scsi_remove_dev - remove attached SCSI device
+ * @dev: ATA device to remove attached SCSI device for
+ *
+ * This function is called from ata_eh_scsi_hotplug() and
+ * responsible for removing the SCSI device attached to @dev.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_scsi_remove_dev(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ /* Alas, we need to grab scan_mutex to ensure SCSI device
+ * state doesn't change underneath us and thus
+ * scsi_device_get() always succeeds. The mutex locking can
+ * be removed if there is __scsi_device_get() interface which
+ * increments reference counts regardless of device state.
+ */
+ mutex_lock(&ap->scsi_host->scan_mutex);
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* clearing dev->sdev is protected by host lock */
+ sdev = dev->sdev;
+ dev->sdev = NULL;
+
+ if (sdev) {
+ /* If user initiated unplug races with us, sdev can go
+ * away underneath us after the host lock and
+ * scan_mutex are released. Hold onto it.
+ */
+ if (scsi_device_get(sdev) == 0) {
+ /* The following ensures the attached sdev is
+ * offline on return from ata_scsi_offline_dev()
+ * regardless it wins or loses the race
+ * against this function.
+ */
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ } else {
+ WARN_ON(1);
+ sdev = NULL;
+ }
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_host->scan_mutex);
+
+ if (sdev) {
+ ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
+ sdev->sdev_gendev.bus_id);
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+}
+
+static void ata_scsi_handle_link_detach(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ unsigned long flags;
+
+ if (!(dev->flags & ATA_DFLAG_DETACHED))
+ continue;
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags &= ~ATA_DFLAG_DETACHED;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_scsi_remove_dev(dev);
+ }
+}
+
+/**
+ * ata_scsi_media_change_notify - send media change event
+ * @dev: Pointer to the disk device with media change event
+ *
+ * Tell the block layer to send a media change notification
+ * event.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_scsi_media_change_notify(struct ata_device *dev)
+{
+ if (dev->sdev)
+ sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
+ GFP_ATOMIC);
+}
+
+/**
+ * ata_scsi_hotplug - SCSI part of hotplug
+ * @work: Pointer to ATA port to perform SCSI hotplug on
+ *
+ * Perform SCSI part of hotplug. It's executed from a separate
+ * workqueue after EH completes. This is necessary because SCSI
+ * hot plugging requires working EH and hot unplugging is
+ * synchronized with hot plugging with a mutex.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_scsi_hotplug(struct work_struct *work)
+{
+ struct ata_port *ap =
+ container_of(work, struct ata_port, hotplug_task.work);
+ int i;
+
+ if (ap->pflags & ATA_PFLAG_UNLOADING) {
+ DPRINTK("ENTER/EXIT - unloading\n");
+ return;
+ }
+
+ DPRINTK("ENTER\n");
+
+ /* Unplug detached devices. We cannot use link iterator here
+ * because PMP links have to be scanned even if PMP is
+ * currently not attached. Iterate manually.
+ */
+ ata_scsi_handle_link_detach(&ap->link);
+ if (ap->pmp_link)
+ for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
+ ata_scsi_handle_link_detach(&ap->pmp_link[i]);
+
+ /* scan for new ones */
+ ata_scsi_scan_host(ap, 0);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_scsi_user_scan - indication for user-initiated bus scan
+ * @shost: SCSI host to scan
+ * @channel: Channel to scan
+ * @id: ID to scan
+ * @lun: LUN to scan
+ *
+ * This function is called when user explicitly requests bus
+ * scan. Set probe pending flag and invoke EH.
+ *
+ * LOCKING:
+ * SCSI layer (we don't care)
+ *
+ * RETURNS:
+ * Zero.
+ */
+static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
+ unsigned int id, unsigned int lun)
+{
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ int devno, rc = 0;
+
+ if (!ap->ops->error_handler)
+ return -EOPNOTSUPP;
+
+ if (lun != SCAN_WILD_CARD && lun)
+ return -EINVAL;
+
+ if (!sata_pmp_attached(ap)) {
+ if (channel != SCAN_WILD_CARD && channel)
+ return -EINVAL;
+ devno = id;
+ } else {
+ if (id != SCAN_WILD_CARD && id)
+ return -EINVAL;
+ devno = channel;
+ }
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (devno == SCAN_WILD_CARD) {
+ struct ata_link *link;
+
+ ata_port_for_each_link(link, ap) {
+ struct ata_eh_info *ehi = &link->eh_info;
+ ehi->probe_mask |= ATA_ALL_DEVICES;
+ ehi->action |= ATA_EH_RESET;
+ }
+ } else {
+ struct ata_device *dev = ata_find_dev(ap, devno);
+
+ if (dev) {
+ struct ata_eh_info *ehi = &dev->link->eh_info;
+ ehi->probe_mask |= 1 << dev->devno;
+ ehi->action |= ATA_EH_RESET;
+ } else
+ rc = -EINVAL;
+ }
+
+ if (rc == 0) {
+ ata_port_schedule_eh(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_port_wait_eh(ap);
+ } else
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+
+/**
+ * ata_scsi_dev_rescan - initiate scsi_rescan_device()
+ * @work: Pointer to ATA port to perform scsi_rescan_device()
+ *
+ * After ATA pass thru (SAT) commands are executed successfully,
+ * libata need to propagate the changes to SCSI layer. This
+ * function must be executed from ata_aux_wq such that sdev
+ * attach/detach don't race with rescan.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_scsi_dev_rescan(struct work_struct *work)
+{
+ struct ata_port *ap =
+ container_of(work, struct ata_port, scsi_rescan_task);
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct scsi_device *sdev = dev->sdev;
+
+ if (!ata_dev_enabled(dev) || !sdev)
+ continue;
+ if (scsi_device_get(sdev))
+ continue;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_rescan_device(&(sdev->sdev_gendev));
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
+ * @host: ATA host container for all SAS ports
+ * @port_info: Information from low-level host driver
+ * @shost: SCSI host that the scsi device is attached to
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * ata_port pointer on success / NULL on failure.
+ */
+
+struct ata_port *ata_sas_port_alloc(struct ata_host *host,
+ struct ata_port_info *port_info,
+ struct Scsi_Host *shost)
+{
+ struct ata_port *ap;
+
+ ap = ata_port_alloc(host);
+ if (!ap)
+ return NULL;
+
+ ap->port_no = 0;
+ ap->lock = shost->host_lock;
+ ap->pio_mask = port_info->pio_mask;
+ ap->mwdma_mask = port_info->mwdma_mask;
+ ap->udma_mask = port_info->udma_mask;
+ ap->flags |= port_info->flags;
+ ap->ops = port_info->port_ops;
+ ap->cbl = ATA_CBL_SATA;
+
+ return ap;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
+
+/**
+ * ata_sas_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_sas_port_start(struct ata_port *ap)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_start);
+
+/**
+ * ata_port_stop - Undo ata_sas_port_start()
+ * @ap: Port to shut down
+ *
+ * May be used as the port_stop() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_sas_port_stop(struct ata_port *ap)
+{
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_stop);
+
+/**
+ * ata_sas_port_init - Initialize a SATA device
+ * @ap: SATA port to initialize
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+
+int ata_sas_port_init(struct ata_port *ap)
+{
+ int rc = ap->ops->port_start(ap);
+
+ if (!rc) {
+ ap->print_id = ata_print_id++;
+ rc = ata_bus_probe(ap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_init);
+
+/**
+ * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
+ * @ap: SATA port to destroy
+ *
+ */
+
+void ata_sas_port_destroy(struct ata_port *ap)
+{
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ kfree(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
+
+/**
+ * ata_sas_slave_configure - Default slave_config routine for libata devices
+ * @sdev: SCSI device to configure
+ * @ap: ATA port to which SCSI device is attached
+ *
+ * RETURNS:
+ * Zero.
+ */
+
+int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
+{
+ ata_scsi_sdev_config(sdev);
+ ata_scsi_dev_config(sdev, ap->link.device);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
+
+/**
+ * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
+ * @cmd: SCSI command to be sent
+ * @done: Completion function, called when command is complete
+ * @ap: ATA port to which the command is being sent
+ *
+ * RETURNS:
+ * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
+ * 0 otherwise.
+ */
+
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
+ struct ata_port *ap)
+{
+ int rc = 0;
+
+ ata_scsi_dump_cdb(ap, cmd);
+
+ if (likely(ata_dev_enabled(ap->link.device)))
+ rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
+ else {
+ cmd->result = (DID_BAD_TARGET << 16);
+ done(cmd);
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
new file mode 100644
index 0000000..9033d16
--- /dev/null
+++ b/drivers/ata/libata-sff.c
@@ -0,0 +1,2876 @@
+/*
+ * libata-sff.c - helper library for PCI IDE BMDMA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2006 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/ and
+ * http://www.sata-io.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/libata.h>
+#include <linux/highmem.h>
+
+#include "libata.h"
+
+const struct ata_port_operations ata_sff_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_prep = ata_sff_qc_prep,
+ .qc_issue = ata_sff_qc_issue,
+ .qc_fill_rtf = ata_sff_qc_fill_rtf,
+
+ .freeze = ata_sff_freeze,
+ .thaw = ata_sff_thaw,
+ .prereset = ata_sff_prereset,
+ .softreset = ata_sff_softreset,
+ .hardreset = sata_sff_hardreset,
+ .postreset = ata_sff_postreset,
+ .error_handler = ata_sff_error_handler,
+ .post_internal_cmd = ata_sff_post_internal_cmd,
+
+ .sff_dev_select = ata_sff_dev_select,
+ .sff_check_status = ata_sff_check_status,
+ .sff_tf_load = ata_sff_tf_load,
+ .sff_tf_read = ata_sff_tf_read,
+ .sff_exec_command = ata_sff_exec_command,
+ .sff_data_xfer = ata_sff_data_xfer,
+ .sff_irq_on = ata_sff_irq_on,
+ .sff_irq_clear = ata_sff_irq_clear,
+
+ .port_start = ata_sff_port_start,
+};
+
+const struct ata_port_operations ata_bmdma_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .mode_filter = ata_bmdma_mode_filter,
+
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
+};
+
+/**
+ * ata_fill_sg - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ ap->prd[pi].addr = cpu_to_le32(addr);
+ ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_fill_sg_dumb - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command. Perform the fill
+ * so that we avoid writing any length 64K records for
+ * controllers that don't follow the spec.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ unsigned int si, pi;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len, blen;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ blen = len & 0xffff;
+ ap->prd[pi].addr = cpu_to_le32(addr);
+ if (blen == 0) {
+ /* Some PATA chipsets like the CS5530 can't
+ cope with 0x0000 meaning 64K as the spec says */
+ ap->prd[pi].flags_len = cpu_to_le32(0x8000);
+ blen = 0x8000;
+ ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
+ }
+ ap->prd[pi].flags_len = cpu_to_le32(blen);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
+
+ pi++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+/**
+ * ata_sff_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sff_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_fill_sg(qc);
+}
+
+/**
+ * ata_sff_dumb_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * Prepare ATA taskfile for submission.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_fill_sg_dumb(qc);
+}
+
+/**
+ * ata_sff_check_status - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Reads ATA taskfile status register for currently-selected device
+ * and return its value. This also clears pending interrupts
+ * from this device
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+u8 ata_sff_check_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.status_addr);
+}
+
+/**
+ * ata_sff_altstatus - Read device alternate status reg
+ * @ap: port where the device is
+ *
+ * Reads ATA taskfile alternate status register for
+ * currently-selected device and return its value.
+ *
+ * Note: may NOT be used as the check_altstatus() entry in
+ * ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static u8 ata_sff_altstatus(struct ata_port *ap)
+{
+ if (ap->ops->sff_check_altstatus)
+ return ap->ops->sff_check_altstatus(ap);
+
+ return ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ * ata_sff_irq_status - Check if the device is busy
+ * @ap: port where the device is
+ *
+ * Determine if the port is currently busy. Uses altstatus
+ * if available in order to avoid clearing shared IRQ status
+ * when finding an IRQ source. Non ctl capable devices don't
+ * share interrupt lines fortunately for us.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static u8 ata_sff_irq_status(struct ata_port *ap)
+{
+ u8 status;
+
+ if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+ status = ata_sff_altstatus(ap);
+ /* Not us: We are busy */
+ if (status & ATA_BUSY)
+ return status;
+ }
+ /* Clear INTRQ latch */
+ status = ap->ops->sff_check_status(ap);
+ return status;
+}
+
+/**
+ * ata_sff_sync - Flush writes
+ * @ap: Port to wait for.
+ *
+ * CAUTION:
+ * If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+static void ata_sff_sync(struct ata_port *ap)
+{
+ if (ap->ops->sff_check_altstatus)
+ ap->ops->sff_check_altstatus(ap);
+ else if (ap->ioaddr.altstatus_addr)
+ ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ * ata_sff_pause - Flush writes and wait 400nS
+ * @ap: Port to pause for.
+ *
+ * CAUTION:
+ * If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_sff_pause(struct ata_port *ap)
+{
+ ata_sff_sync(ap);
+ ndelay(400);
+}
+
+/**
+ * ata_sff_dma_pause - Pause before commencing DMA
+ * @ap: Port to pause for.
+ *
+ * Perform I/O fencing and ensure sufficient cycle delays occur
+ * for the HDMA1:0 transition
+ */
+
+void ata_sff_dma_pause(struct ata_port *ap)
+{
+ if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+ /* An altstatus read will cause the needed delay without
+ messing up the IRQ status */
+ ata_sff_altstatus(ap);
+ return;
+ }
+ /* There are no DMA controllers without ctl. BUG here to ensure
+ we never violate the HDMA1:0 transition timing and risk
+ corruption. */
+ BUG();
+}
+
+/**
+ * ata_sff_busy_sleep - sleep until BSY clears, or timeout
+ * @ap: port containing status register to be polled
+ * @tmout_pat: impatience timeout in msecs
+ * @tmout: overall timeout in msecs
+ *
+ * Sleep until ATA Status register bit BSY clears,
+ * or a timeout occurs.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_sff_busy_sleep(struct ata_port *ap,
+ unsigned long tmout_pat, unsigned long tmout)
+{
+ unsigned long timer_start, timeout;
+ u8 status;
+
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
+ timer_start = jiffies;
+ timeout = ata_deadline(timer_start, tmout_pat);
+ while (status != 0xff && (status & ATA_BUSY) &&
+ time_before(jiffies, timeout)) {
+ msleep(50);
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
+ }
+
+ if (status != 0xff && (status & ATA_BUSY))
+ ata_port_printk(ap, KERN_WARNING,
+ "port is slow to respond, please be patient "
+ "(Status 0x%x)\n", status);
+
+ timeout = ata_deadline(timer_start, tmout);
+ while (status != 0xff && (status & ATA_BUSY) &&
+ time_before(jiffies, timeout)) {
+ msleep(50);
+ status = ap->ops->sff_check_status(ap);
+ }
+
+ if (status == 0xff)
+ return -ENODEV;
+
+ if (status & ATA_BUSY) {
+ ata_port_printk(ap, KERN_ERR, "port failed to respond "
+ "(%lu secs, Status 0x%x)\n",
+ DIV_ROUND_UP(tmout, 1000), status);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int ata_sff_check_ready(struct ata_link *link)
+{
+ u8 status = link->ap->ops->sff_check_status(link->ap);
+
+ return ata_check_ready(status);
+}
+
+/**
+ * ata_sff_wait_ready - sleep until BSY clears, or timeout
+ * @link: SFF link to wait ready status for
+ * @deadline: deadline jiffies for the operation
+ *
+ * Sleep until ATA Status register bit BSY clears, or timeout
+ * occurs.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
+{
+ return ata_wait_ready(link, deadline, ata_sff_check_ready);
+}
+
+/**
+ * ata_sff_dev_select - Select device 0/1 on ATA bus
+ * @ap: ATA channel to manipulate
+ * @device: ATA device (numbered from zero) to select
+ *
+ * Use the method defined in the ATA specification to
+ * make either device 0, or device 1, active on the
+ * ATA channel. Works with both PIO and MMIO.
+ *
+ * May be used as the dev_select() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * caller.
+ */
+void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
+{
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ iowrite8(tmp, ap->ioaddr.device_addr);
+ ata_sff_pause(ap); /* needed; also flushes, for mmio */
+}
+
+/**
+ * ata_dev_select - Select device 0/1 on ATA bus
+ * @ap: ATA channel to manipulate
+ * @device: ATA device (numbered from zero) to select
+ * @wait: non-zero to wait for Status register BSY bit to clear
+ * @can_sleep: non-zero if context allows sleeping
+ *
+ * Use the method defined in the ATA specification to
+ * make either device 0, or device 1, active on the
+ * ATA channel.
+ *
+ * This is a high-level version of ata_sff_dev_select(), which
+ * additionally provides the services of inserting the proper
+ * pauses and status polling, where needed.
+ *
+ * LOCKING:
+ * caller.
+ */
+void ata_dev_select(struct ata_port *ap, unsigned int device,
+ unsigned int wait, unsigned int can_sleep)
+{
+ if (ata_msg_probe(ap))
+ ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
+ "device %u, wait %u\n", device, wait);
+
+ if (wait)
+ ata_wait_idle(ap);
+
+ ap->ops->sff_dev_select(ap, device);
+
+ if (wait) {
+ if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
+ msleep(150);
+ ata_wait_idle(ap);
+ }
+}
+
+/**
+ * ata_sff_irq_on - Enable interrupts on a port.
+ * @ap: Port on which interrupts are enabled.
+ *
+ * Enable interrupts on a legacy IDE device using MMIO or PIO,
+ * wait for idle, clear any pending interrupts.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+u8 ata_sff_irq_on(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 tmp;
+
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ if (ioaddr->ctl_addr)
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->sff_irq_clear(ap);
+
+ return tmp;
+}
+
+/**
+ * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Clear interrupt and error flags in DMA status register.
+ *
+ * May be used as the irq_clear() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sff_irq_clear(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
+}
+
+/**
+ * ata_sff_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ if (ioaddr->ctl_addr)
+ iowrite8(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ WARN_ON(!ioaddr->ctl_addr);
+ iowrite8(tf->hob_feature, ioaddr->feature_addr);
+ iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
+ iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
+ iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
+ iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ iowrite8(tf->feature, ioaddr->feature_addr);
+ iowrite8(tf->nsect, ioaddr->nsect_addr);
+ iowrite8(tf->lbal, ioaddr->lbal_addr);
+ iowrite8(tf->lbam, ioaddr->lbam_addr);
+ iowrite8(tf->lbah, ioaddr->lbah_addr);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ iowrite8(tf->device, ioaddr->device_addr);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * ata_sff_tf_read - input device's ATA taskfile shadow registers
+ * @ap: Port from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Reads ATA taskfile registers for currently-selected device
+ * into @tf. Assumes the device has a fully SFF compliant task file
+ * layout and behaviour. If you device does not (eg has a different
+ * status method) then you will need to provide a replacement tf_read
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = ata_sff_check_status(ap);
+ tf->feature = ioread8(ioaddr->error_addr);
+ tf->nsect = ioread8(ioaddr->nsect_addr);
+ tf->lbal = ioread8(ioaddr->lbal_addr);
+ tf->lbam = ioread8(ioaddr->lbam_addr);
+ tf->lbah = ioread8(ioaddr->lbah_addr);
+ tf->device = ioread8(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ if (likely(ioaddr->ctl_addr)) {
+ iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ioread8(ioaddr->error_addr);
+ tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+ tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+ tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+ tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+ iowrite8(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ } else
+ WARN_ON(1);
+ }
+}
+
+/**
+ * ata_sff_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA command, with proper synchronization with interrupt
+ * handler / other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ ata_sff_pause(ap);
+}
+
+/**
+ * ata_tf_to_host - issue ATA taskfile to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA taskfile register set to ATA host controller,
+ * with proper synchronization with interrupt handler and
+ * other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static inline void ata_tf_to_host(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ ap->ops->sff_tf_load(ap, tf);
+ ap->ops->sff_exec_command(ap, tf);
+}
+
+/**
+ * ata_sff_data_xfer - Transfer data by PIO
+ * @dev: device to target
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @rw: read/write
+ *
+ * Transfer data from/to the device data register by PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * Bytes consumed.
+ */
+unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *data_addr = ap->ioaddr.data_addr;
+ unsigned int words = buflen >> 1;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ ioread16_rep(data_addr, buf, words);
+ else
+ iowrite16_rep(data_addr, buf, words);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ __le16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (rw == READ) {
+ align_buf[0] = cpu_to_le16(ioread16(data_addr));
+ memcpy(trailing_buf, align_buf, 1);
+ } else {
+ memcpy(align_buf, trailing_buf, 1);
+ iowrite16(le16_to_cpu(align_buf[0]), data_addr);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+/**
+ * ata_sff_data_xfer_noirq - Transfer data by PIO
+ * @dev: device to target
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @rw: read/write
+ *
+ * Transfer data from/to the device data register by PIO. Do the
+ * transfer with interrupts disabled.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ * RETURNS:
+ * Bytes consumed.
+ */
+unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ unsigned long flags;
+ unsigned int consumed;
+
+ local_irq_save(flags);
+ consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+ local_irq_restore(flags);
+
+ return consumed;
+}
+
+/**
+ * ata_pio_sector - Transfer a sector of data.
+ * @qc: Command on going
+ *
+ * Transfer qc->sect_size bytes of data from/to the ATA device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void ata_pio_sector(struct ata_queued_cmd *qc)
+{
+ int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ struct ata_port *ap = qc->ap;
+ struct page *page;
+ unsigned int offset;
+ unsigned char *buf;
+
+ if (qc->curbytes == qc->nbytes - qc->sect_size)
+ ap->hsm_task_state = HSM_ST_LAST;
+
+ page = sg_page(qc->cursg);
+ offset = qc->cursg->offset + qc->cursg_ofs;
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+
+ DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+ if (PageHighMem(page)) {
+ unsigned long flags;
+
+ /* FIXME: use a bounce buffer */
+ local_irq_save(flags);
+ buf = kmap_atomic(page, KM_IRQ0);
+
+ /* do the actual data transfer */
+ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+ do_write);
+
+ kunmap_atomic(buf, KM_IRQ0);
+ local_irq_restore(flags);
+ } else {
+ buf = page_address(page);
+ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+ do_write);
+ }
+
+ qc->curbytes += qc->sect_size;
+ qc->cursg_ofs += qc->sect_size;
+
+ if (qc->cursg_ofs == qc->cursg->length) {
+ qc->cursg = sg_next(qc->cursg);
+ qc->cursg_ofs = 0;
+ }
+}
+
+/**
+ * ata_pio_sectors - Transfer one or many sectors.
+ * @qc: Command on going
+ *
+ * Transfer one or many sectors of data from/to the
+ * ATA device for the DRQ request.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void ata_pio_sectors(struct ata_queued_cmd *qc)
+{
+ if (is_multi_taskfile(&qc->tf)) {
+ /* READ/WRITE MULTIPLE */
+ unsigned int nsect;
+
+ WARN_ON(qc->dev->multi_count == 0);
+
+ nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
+ qc->dev->multi_count);
+ while (nsect--)
+ ata_pio_sector(qc);
+ } else
+ ata_pio_sector(qc);
+
+ ata_sff_sync(qc->ap); /* flush */
+}
+
+/**
+ * atapi_send_cdb - Write CDB bytes to hardware
+ * @ap: Port to which ATAPI device is attached.
+ * @qc: Taskfile currently active
+ *
+ * When device has indicated its readiness to accept
+ * a CDB, this function is called. Send the CDB.
+ *
+ * LOCKING:
+ * caller.
+ */
+static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ /* send SCSI cdb */
+ DPRINTK("send cdb\n");
+ WARN_ON(qc->dev->cdb_len < 12);
+
+ ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
+ ata_sff_sync(ap);
+ /* FIXME: If the CDB is for DMA do we need to do the transition delay
+ or is bmdma_start guaranteed to do it ? */
+ switch (qc->tf.protocol) {
+ case ATAPI_PROT_PIO:
+ ap->hsm_task_state = HSM_ST;
+ break;
+ case ATAPI_PROT_NODATA:
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+ case ATAPI_PROT_DMA:
+ ap->hsm_task_state = HSM_ST_LAST;
+ /* initiate bmdma */
+ ap->ops->bmdma_start(qc);
+ break;
+ }
+}
+
+/**
+ * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ * @bytes: number of bytes
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+ int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
+ struct ata_port *ap = qc->ap;
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned char *buf;
+ unsigned int offset, count, consumed;
+
+next_sg:
+ sg = qc->cursg;
+ if (unlikely(!sg)) {
+ ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
+ "buf=%u cur=%u bytes=%u",
+ qc->nbytes, qc->curbytes, bytes);
+ return -1;
+ }
+
+ page = sg_page(sg);
+ offset = sg->offset + qc->cursg_ofs;
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+
+ /* don't overrun current sg */
+ count = min(sg->length - qc->cursg_ofs, bytes);
+
+ /* don't cross page boundaries */
+ count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+ DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+ if (PageHighMem(page)) {
+ unsigned long flags;
+
+ /* FIXME: use bounce buffer */
+ local_irq_save(flags);
+ buf = kmap_atomic(page, KM_IRQ0);
+
+ /* do the actual data transfer */
+ consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw);
+
+ kunmap_atomic(buf, KM_IRQ0);
+ local_irq_restore(flags);
+ } else {
+ buf = page_address(page);
+ consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw);
+ }
+
+ bytes -= min(bytes, consumed);
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
+
+ if (qc->cursg_ofs == sg->length) {
+ qc->cursg = sg_next(qc->cursg);
+ qc->cursg_ofs = 0;
+ }
+
+ /* consumed can be larger than count only for the last transfer */
+ WARN_ON(qc->cursg && count != consumed);
+
+ if (bytes)
+ goto next_sg;
+ return 0;
+}
+
+/**
+ * atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
+ unsigned int ireason, bc_lo, bc_hi, bytes;
+ int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+ /* Abuse qc->result_tf for temp storage of intermediate TF
+ * here to save some kernel stack usage.
+ * For normal completion, qc->result_tf is not relevant. For
+ * error, qc->result_tf is later overwritten by ata_qc_complete().
+ * So, the correctness of qc->result_tf is not affected.
+ */
+ ap->ops->sff_tf_read(ap, &qc->result_tf);
+ ireason = qc->result_tf.nsect;
+ bc_lo = qc->result_tf.lbam;
+ bc_hi = qc->result_tf.lbah;
+ bytes = (bc_hi << 8) | bc_lo;
+
+ /* shall be cleared to zero, indicating xfer of data */
+ if (unlikely(ireason & (1 << 0)))
+ goto atapi_check;
+
+ /* make sure transfer direction matches expected */
+ i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+ if (unlikely(do_write != i_write))
+ goto atapi_check;
+
+ if (unlikely(!bytes))
+ goto atapi_check;
+
+ VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
+
+ if (unlikely(__atapi_pio_bytes(qc, bytes)))
+ goto err_out;
+ ata_sff_sync(ap); /* flush */
+
+ return;
+
+ atapi_check:
+ ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
+ ireason, bytes);
+ err_out:
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+}
+
+/**
+ * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
+ * @ap: the target ata_port
+ * @qc: qc on going
+ *
+ * RETURNS:
+ * 1 if ok in workqueue, 0 otherwise.
+ */
+static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ return 1;
+
+ if (ap->hsm_task_state == HSM_ST_FIRST) {
+ if (qc->tf.protocol == ATA_PROT_PIO &&
+ (qc->tf.flags & ATA_TFLAG_WRITE))
+ return 1;
+
+ if (ata_is_atapi(qc->tf.protocol) &&
+ !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_hsm_qc_complete - finish a qc running on standard HSM
+ * @qc: Command to complete
+ * @in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ * Finish @qc which is running on standard HSM.
+ *
+ * LOCKING:
+ * If @in_wq is zero, spin_lock_irqsave(host lock).
+ * Otherwise, none on entry and grabs host lock.
+ */
+static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ if (ap->ops->error_handler) {
+ if (in_wq) {
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* EH might have kicked in while host lock is
+ * released.
+ */
+ qc = ata_qc_from_tag(ap, qc->tag);
+ if (qc) {
+ if (likely(!(qc->err_mask & AC_ERR_HSM))) {
+ ap->ops->sff_irq_on(ap);
+ ata_qc_complete(qc);
+ } else
+ ata_port_freeze(ap);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ } else {
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ ata_qc_complete(qc);
+ else
+ ata_port_freeze(ap);
+ }
+ } else {
+ if (in_wq) {
+ spin_lock_irqsave(ap->lock, flags);
+ ap->ops->sff_irq_on(ap);
+ ata_qc_complete(qc);
+ spin_unlock_irqrestore(ap->lock, flags);
+ } else
+ ata_qc_complete(qc);
+ }
+}
+
+/**
+ * ata_sff_hsm_move - move the HSM to the next state.
+ * @ap: the target ata_port
+ * @qc: qc on going
+ * @status: current device status
+ * @in_wq: 1 if called from workqueue, 0 otherwise
+ *
+ * RETURNS:
+ * 1 when poll next status needed, 0 otherwise.
+ */
+int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u8 status, int in_wq)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ unsigned long flags = 0;
+ int poll_next;
+
+ WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+
+ /* Make sure ata_sff_qc_issue() does not throw things
+ * like DMA polling into the workqueue. Notice that
+ * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
+ */
+ WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
+
+fsm_start:
+ DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
+ ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
+
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Send first data block or PACKET CDB */
+
+ /* If polling, we will stay in the work queue after
+ * sending the data. Otherwise, interrupt handler
+ * takes over after sending the data.
+ */
+ poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
+
+ /* check device status */
+ if (unlikely((status & ATA_DRQ) == 0)) {
+ /* handle BSY=0, DRQ=0 as error */
+ if (likely(status & (ATA_ERR | ATA_DF)))
+ /* device stops HSM for abort/error */
+ qc->err_mask |= AC_ERR_DEV;
+ else {
+ /* HSM violation. Let EH handle this */
+ ata_ehi_push_desc(ehi,
+ "ST_FIRST: !(DRQ|ERR|DF)");
+ qc->err_mask |= AC_ERR_HSM;
+ }
+
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ /* Device should not ask for data transfer (DRQ=1)
+ * when it finds something wrong.
+ * We ignore DRQ here and stop the HSM by
+ * changing hsm_task_state to HSM_ST_ERR and
+ * let the EH abort the command or reset the device.
+ */
+ if (unlikely(status & (ATA_ERR | ATA_DF))) {
+ /* Some ATAPI tape drives forget to clear the ERR bit
+ * when doing the next command (mostly request sense).
+ * We ignore ERR here to workaround and proceed sending
+ * the CDB.
+ */
+ if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
+ ata_ehi_push_desc(ehi, "ST_FIRST: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+ }
+
+ /* Send the CDB (atapi) or the first data block (ata pio out).
+ * During the state transition, interrupt handler shouldn't
+ * be invoked before the data transfer is complete and
+ * hsm_task_state is changed. Hence, the following locking.
+ */
+ if (in_wq)
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (qc->tf.protocol == ATA_PROT_PIO) {
+ /* PIO data out protocol.
+ * send first data block.
+ */
+
+ /* ata_pio_sectors() might change the state
+ * to HSM_ST_LAST. so, the state is changed here
+ * before ata_pio_sectors().
+ */
+ ap->hsm_task_state = HSM_ST;
+ ata_pio_sectors(qc);
+ } else
+ /* send CDB */
+ atapi_send_cdb(ap, qc);
+
+ if (in_wq)
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* if polling, ata_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+ break;
+
+ case HSM_ST:
+ /* complete command or read/write the data register */
+ if (qc->tf.protocol == ATAPI_PROT_PIO) {
+ /* ATAPI PIO protocol */
+ if ((status & ATA_DRQ) == 0) {
+ /* No more data to transfer or device error.
+ * Device error will be tagged in HSM_ST_LAST.
+ */
+ ap->hsm_task_state = HSM_ST_LAST;
+ goto fsm_start;
+ }
+
+ /* Device should not ask for data transfer (DRQ=1)
+ * when it finds something wrong.
+ * We ignore DRQ here and stop the HSM by
+ * changing hsm_task_state to HSM_ST_ERR and
+ * let the EH abort the command or reset the device.
+ */
+ if (unlikely(status & (ATA_ERR | ATA_DF))) {
+ ata_ehi_push_desc(ehi, "ST-ATAPI: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
+ qc->err_mask |= AC_ERR_HSM;
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ atapi_pio_bytes(qc);
+
+ if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
+ /* bad ireason reported by device */
+ goto fsm_start;
+
+ } else {
+ /* ATA PIO protocol */
+ if (unlikely((status & ATA_DRQ) == 0)) {
+ /* handle BSY=0, DRQ=0 as error */
+ if (likely(status & (ATA_ERR | ATA_DF))) {
+ /* device stops HSM for abort/error */
+ qc->err_mask |= AC_ERR_DEV;
+
+ /* If diagnostic failed and this is
+ * IDENTIFY, it's likely a phantom
+ * device. Mark hint.
+ */
+ if (qc->dev->horkage &
+ ATA_HORKAGE_DIAGNOSTIC)
+ qc->err_mask |=
+ AC_ERR_NODEV_HINT;
+ } else {
+ /* HSM violation. Let EH handle this.
+ * Phantom devices also trigger this
+ * condition. Mark hint.
+ */
+ ata_ehi_push_desc(ehi, "ST-ATA: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
+ qc->err_mask |= AC_ERR_HSM |
+ AC_ERR_NODEV_HINT;
+ }
+
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ /* For PIO reads, some devices may ask for
+ * data transfer (DRQ=1) alone with ERR=1.
+ * We respect DRQ here and transfer one
+ * block of junk data before changing the
+ * hsm_task_state to HSM_ST_ERR.
+ *
+ * For PIO writes, ERR=1 DRQ=1 doesn't make
+ * sense since the data block has been
+ * transferred to the device.
+ */
+ if (unlikely(status & (ATA_ERR | ATA_DF))) {
+ /* data might be corrputed */
+ qc->err_mask |= AC_ERR_DEV;
+
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
+ ata_pio_sectors(qc);
+ status = ata_wait_idle(ap);
+ }
+
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ ata_ehi_push_desc(ehi, "ST-ATA: "
+ "BUSY|DRQ persists on ERR|DF, "
+ "dev_stat 0x%X", status);
+ qc->err_mask |= AC_ERR_HSM;
+ }
+
+ /* ata_pio_sectors() might change the
+ * state to HSM_ST_LAST. so, the state
+ * is changed after ata_pio_sectors().
+ */
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ ata_pio_sectors(qc);
+
+ if (ap->hsm_task_state == HSM_ST_LAST &&
+ (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+ /* all data read */
+ status = ata_wait_idle(ap);
+ goto fsm_start;
+ }
+ }
+
+ poll_next = 1;
+ break;
+
+ case HSM_ST_LAST:
+ if (unlikely(!ata_ok(status))) {
+ qc->err_mask |= __ac_err_mask(status);
+ ap->hsm_task_state = HSM_ST_ERR;
+ goto fsm_start;
+ }
+
+ /* no more data to transfer */
+ DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
+ ap->print_id, qc->dev->devno, status);
+
+ WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ /* complete taskfile transaction */
+ ata_hsm_qc_complete(qc, in_wq);
+
+ poll_next = 0;
+ break;
+
+ case HSM_ST_ERR:
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ /* complete taskfile transaction */
+ ata_hsm_qc_complete(qc, in_wq);
+
+ poll_next = 0;
+ break;
+ default:
+ poll_next = 0;
+ BUG();
+ }
+
+ return poll_next;
+}
+
+void ata_pio_task(struct work_struct *work)
+{
+ struct ata_port *ap =
+ container_of(work, struct ata_port, port_task.work);
+ struct ata_queued_cmd *qc = ap->port_task_data;
+ u8 status;
+ int poll_next;
+
+fsm_start:
+ WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
+
+ /*
+ * This is purely heuristic. This is a fast path.
+ * Sometimes when we enter, BSY will be cleared in
+ * a chk-status or two. If not, the drive is probably seeking
+ * or something. Snooze for a couple msecs, then
+ * chk-status again. If still busy, queue delayed work.
+ */
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ if (status & ATA_BUSY) {
+ msleep(2);
+ status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ if (status & ATA_BUSY) {
+ ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+ return;
+ }
+ }
+
+ /* move the HSM */
+ poll_next = ata_sff_hsm_move(ap, qc, status, 1);
+
+ /* another command or interrupt handler
+ * may be running at this point.
+ */
+ if (poll_next)
+ goto fsm_start;
+}
+
+/**
+ * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
+ * @qc: command to issue to device
+ *
+ * Using various libata functions and hooks, this function
+ * starts an ATA command. ATA commands are grouped into
+ * classes called "protocols", and issuing each type of protocol
+ * is slightly different.
+ *
+ * May be used as the qc_issue() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* Use polling pio if the LLD doesn't handle
+ * interrupt driven pio and atapi CDB interrupt.
+ */
+ if (ap->flags & ATA_FLAG_PIO_POLLING) {
+ switch (qc->tf.protocol) {
+ case ATA_PROT_PIO:
+ case ATA_PROT_NODATA:
+ case ATAPI_PROT_PIO:
+ case ATAPI_PROT_NODATA:
+ qc->tf.flags |= ATA_TFLAG_POLLING;
+ break;
+ case ATAPI_PROT_DMA:
+ if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+ /* see ata_dma_blacklisted() */
+ BUG();
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* select the device */
+ ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+ /* start the command */
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
+
+ ata_tf_to_host(ap, &qc->tf);
+ ap->hsm_task_state = HSM_ST_LAST;
+
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_pio_queue_task(ap, qc, 0);
+
+ break;
+
+ case ATA_PROT_DMA:
+ WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+
+ case ATA_PROT_PIO:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
+
+ ata_tf_to_host(ap, &qc->tf);
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ /* PIO data out protocol */
+ ap->hsm_task_state = HSM_ST_FIRST;
+ ata_pio_queue_task(ap, qc, 0);
+
+ /* always send first data block using
+ * the ata_pio_task() codepath.
+ */
+ } else {
+ /* PIO data in protocol */
+ ap->hsm_task_state = HSM_ST;
+
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_pio_queue_task(ap, qc, 0);
+
+ /* if polling, ata_pio_task() handles the rest.
+ * otherwise, interrupt handler takes over from here.
+ */
+ }
+
+ break;
+
+ case ATAPI_PROT_PIO:
+ case ATAPI_PROT_NODATA:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ ata_qc_set_polling(qc);
+
+ ata_tf_to_host(ap, &qc->tf);
+
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
+ (qc->tf.flags & ATA_TFLAG_POLLING))
+ ata_pio_queue_task(ap, qc, 0);
+ break;
+
+ case ATAPI_PROT_DMA:
+ WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->bmdma_setup(qc); /* set up bmdma */
+ ap->hsm_task_state = HSM_ST_FIRST;
+
+ /* send cdb by polling if no cdb interrupt */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ ata_pio_queue_task(ap, qc, 0);
+ break;
+
+ default:
+ WARN_ON(1);
+ return AC_ERR_SYSTEM;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
+ * @qc: qc to fill result TF for
+ *
+ * @qc is finished and result TF needs to be filled. Fill it
+ * using ->sff_tf_read.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true indicating that result TF is successfully filled.
+ */
+bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
+ return true;
+}
+
+/**
+ * ata_sff_host_intr - Handle host interrupt for given (port, task)
+ * @ap: Port on which interrupt arrived (possibly...)
+ * @qc: Taskfile currently active in engine
+ *
+ * Handle host interrupt for given queued command. Currently,
+ * only DMA interrupts are handled. All other commands are
+ * handled via polling with interrupts disabled (nIEN bit).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * One if interrupt was handled, zero if not (shared irq).
+ */
+inline unsigned int ata_sff_host_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u8 status, host_stat = 0;
+
+ VPRINTK("ata%u: protocol %d task_state %d\n",
+ ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+ /* Check whether we are expecting interrupt in this state */
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Some pre-ATAPI-4 devices assert INTRQ
+ * at this state when ready to receive CDB.
+ */
+
+ /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+ * The flag was turned on only for atapi devices. No
+ * need to check ata_is_atapi(qc->tf.protocol) again.
+ */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ goto idle_irq;
+ break;
+ case HSM_ST_LAST:
+ if (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATAPI_PROT_DMA) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n",
+ ap->print_id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ goto idle_irq;
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+ break;
+ case HSM_ST:
+ break;
+ default:
+ goto idle_irq;
+ }
+
+
+ /* check main status, clearing INTRQ if needed */
+ status = ata_sff_irq_status(ap);
+ if (status & ATA_BUSY)
+ goto idle_irq;
+
+ /* ack bmdma irq events */
+ ap->ops->sff_irq_clear(ap);
+
+ ata_sff_hsm_move(ap, qc, status, 0);
+
+ if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATAPI_PROT_DMA))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+ return 1; /* irq handled */
+
+idle_irq:
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ ap->ops->sff_check_status(ap);
+ ap->ops->sff_irq_clear(ap);
+ ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ return 1;
+ }
+#endif
+ return 0; /* irq not handled */
+}
+
+/**
+ * ata_sff_interrupt - Default ATA host interrupt handler
+ * @irq: irq line (unused)
+ * @dev_instance: pointer to our ata_host information structure
+ *
+ * Default interrupt handler for PCI IDE devices. Calls
+ * ata_sff_host_intr() for each port that is not disabled.
+ *
+ * LOCKING:
+ * Obtains host lock during operation.
+ *
+ * RETURNS:
+ * IRQ_NONE or IRQ_HANDLED.
+ */
+irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ ap = host->ports[i];
+ if (ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+ (qc->flags & ATA_QCFLAG_ACTIVE))
+ handled |= ata_sff_host_intr(ap, qc);
+ }
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+/**
+ * ata_sff_freeze - Freeze SFF controller port
+ * @ap: port to freeze
+ *
+ * Freeze BMDMA controller port.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_sff_freeze(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ap->ctl |= ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ if (ioaddr->ctl_addr)
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+
+ /* Under certain circumstances, some controllers raise IRQ on
+ * ATA_NIEN manipulation. Also, many controllers fail to mask
+ * previously pending IRQ on ATA_NIEN assertion. Clear it.
+ */
+ ap->ops->sff_check_status(ap);
+
+ ap->ops->sff_irq_clear(ap);
+}
+
+/**
+ * ata_sff_thaw - Thaw SFF controller port
+ * @ap: port to thaw
+ *
+ * Thaw SFF controller port.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ata_sff_thaw(struct ata_port *ap)
+{
+ /* clear & re-enable interrupts */
+ ap->ops->sff_check_status(ap);
+ ap->ops->sff_irq_clear(ap);
+ ap->ops->sff_irq_on(ap);
+}
+
+/**
+ * ata_sff_prereset - prepare SFF link for reset
+ * @link: SFF link to be reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * SFF link @link is about to be reset. Initialize it. It first
+ * calls ata_std_prereset() and wait for !BSY if the port is
+ * being softreset.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_eh_context *ehc = &link->eh_context;
+ int rc;
+
+ rc = ata_std_prereset(link, deadline);
+ if (rc)
+ return rc;
+
+ /* if we're about to do hardreset, nothing more to do */
+ if (ehc->i.action & ATA_EH_HARDRESET)
+ return 0;
+
+ /* wait for !BSY if we don't know that no device is attached */
+ if (!ata_link_offline(link)) {
+ rc = ata_sff_wait_ready(link, deadline);
+ if (rc && rc != -ENODEV) {
+ ata_link_printk(link, KERN_WARNING, "device not ready "
+ "(errno=%d), forcing hardreset\n", rc);
+ ehc->i.action |= ATA_EH_HARDRESET;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ata_devchk - PATA device presence detection
+ * @ap: ATA channel to examine
+ * @device: Device to examine (starting at zero)
+ *
+ * This technique was originally described in
+ * Hale Landis's ATADRVR (www.ata-atapi.com), and
+ * later found its way into the ATA/ATAPI spec.
+ *
+ * Write a pattern to the ATA shadow registers,
+ * and if a device is present, it will respond by
+ * correctly storing and echoing back the
+ * ATA shadow register contents.
+ *
+ * LOCKING:
+ * caller.
+ */
+static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ ap->ops->sff_dev_select(ap, device);
+
+ iowrite8(0x55, ioaddr->nsect_addr);
+ iowrite8(0xaa, ioaddr->lbal_addr);
+
+ iowrite8(0xaa, ioaddr->nsect_addr);
+ iowrite8(0x55, ioaddr->lbal_addr);
+
+ iowrite8(0x55, ioaddr->nsect_addr);
+ iowrite8(0xaa, ioaddr->lbal_addr);
+
+ nsect = ioread8(ioaddr->nsect_addr);
+ lbal = ioread8(ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * ata_sff_dev_classify - Parse returned ATA device signature
+ * @dev: ATA device to classify (starting at zero)
+ * @present: device seems present
+ * @r_err: Value of error register on completion
+ *
+ * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
+ * an ATA/ATAPI-defined set of values is placed in the ATA
+ * shadow registers, indicating the results of device detection
+ * and diagnostics.
+ *
+ * Select the ATA device, and read the values from the ATA shadow
+ * registers. Then parse according to the Error register value,
+ * and the spec-defined values examined by ata_dev_classify().
+ *
+ * LOCKING:
+ * caller.
+ *
+ * RETURNS:
+ * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
+ */
+unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
+ u8 *r_err)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_taskfile tf;
+ unsigned int class;
+ u8 err;
+
+ ap->ops->sff_dev_select(ap, dev->devno);
+
+ memset(&tf, 0, sizeof(tf));
+
+ ap->ops->sff_tf_read(ap, &tf);
+ err = tf.feature;
+ if (r_err)
+ *r_err = err;
+
+ /* see if device passed diags: continue and warn later */
+ if (err == 0)
+ /* diagnostic fail : do nothing _YET_ */
+ dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
+ else if (err == 1)
+ /* do nothing */ ;
+ else if ((dev->devno == 0) && (err == 0x81))
+ /* do nothing */ ;
+ else
+ return ATA_DEV_NONE;
+
+ /* determine if device is ATA or ATAPI */
+ class = ata_dev_classify(&tf);
+
+ if (class == ATA_DEV_UNKNOWN) {
+ /* If the device failed diagnostic, it's likely to
+ * have reported incorrect device signature too.
+ * Assume ATA device if the device seems present but
+ * device signature is invalid with diagnostic
+ * failure.
+ */
+ if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
+ class = ATA_DEV_ATA;
+ else
+ class = ATA_DEV_NONE;
+ } else if ((class == ATA_DEV_ATA) &&
+ (ap->ops->sff_check_status(ap) == 0))
+ class = ATA_DEV_NONE;
+
+ return class;
+}
+
+/**
+ * ata_sff_wait_after_reset - wait for devices to become ready after reset
+ * @link: SFF link which is just reset
+ * @devmask: mask of present devices
+ * @deadline: deadline jiffies for the operation
+ *
+ * Wait devices attached to SFF @link to become ready after
+ * reset. It contains preceding 150ms wait to avoid accessing TF
+ * status register too early.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -ENODEV if some or all of devices in @devmask
+ * don't seem to exist. -errno on other errors.
+ */
+int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ int rc, ret = 0;
+
+ msleep(ATA_WAIT_AFTER_RESET);
+
+ /* always check readiness of the master device */
+ rc = ata_sff_wait_ready(link, deadline);
+ /* -ENODEV means the odd clown forgot the D7 pulldown resistor
+ * and TF status is 0xff, bail out on it too.
+ */
+ if (rc)
+ return rc;
+
+ /* if device 1 was found in ata_devchk, wait for register
+ * access briefly, then wait for BSY to clear.
+ */
+ if (dev1) {
+ int i;
+
+ ap->ops->sff_dev_select(ap, 1);
+
+ /* Wait for register access. Some ATAPI devices fail
+ * to set nsect/lbal after reset, so don't waste too
+ * much time on it. We're gonna wait for !BSY anyway.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 nsect, lbal;
+
+ nsect = ioread8(ioaddr->nsect_addr);
+ lbal = ioread8(ioaddr->lbal_addr);
+ if ((nsect == 1) && (lbal == 1))
+ break;
+ msleep(50); /* give drive a breather */
+ }
+
+ rc = ata_sff_wait_ready(link, deadline);
+ if (rc) {
+ if (rc != -ENODEV)
+ return rc;
+ ret = rc;
+ }
+ }
+
+ /* is all this really necessary? */
+ ap->ops->sff_dev_select(ap, 0);
+ if (dev1)
+ ap->ops->sff_dev_select(ap, 1);
+ if (dev0)
+ ap->ops->sff_dev_select(ap, 0);
+
+ return ret;
+}
+
+static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+/**
+ * ata_sff_softreset - reset host port via ATA SRST
+ * @link: ATA link to reset
+ * @classes: resulting classes of attached devices
+ * @deadline: deadline jiffies for the operation
+ *
+ * Reset host port using ATA SRST.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ DPRINTK("ENTER\n");
+
+ /* determine if device 0/1 are present */
+ if (ata_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && ata_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ rc = ata_bus_softreset(ap, devmask, deadline);
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+ ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0],
+ devmask & (1 << 0), &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_sff_dev_classify(&link->device[1],
+ devmask & (1 << 1), &err);
+
+ DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+ return 0;
+}
+
+/**
+ * sata_sff_hardreset - reset host port via SATA phy reset
+ * @link: link to reset
+ * @class: resulting class of attached device
+ * @deadline: deadline jiffies for the operation
+ *
+ * SATA phy-reset host port using DET bits of SControl register,
+ * wait for !BSY and classify the attached device.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_eh_context *ehc = &link->eh_context;
+ const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ bool online;
+ int rc;
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ata_sff_check_ready);
+ if (online)
+ *class = ata_sff_dev_classify(link->device, 1, NULL);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return rc;
+}
+
+/**
+ * ata_sff_postreset - SFF postreset callback
+ * @link: the target SFF ata_link
+ * @classes: classes of attached devices
+ *
+ * This function is invoked after a successful reset. It first
+ * calls ata_std_postreset() and performs SFF specific postreset
+ * processing.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
+{
+ struct ata_port *ap = link->ap;
+
+ ata_std_postreset(link, classes);
+
+ /* is double-select really necessary? */
+ if (classes[0] != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 1);
+ if (classes[1] != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* set up device control */
+ if (ap->ioaddr.ctl_addr)
+ iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
+ * ata_sff_error_handler - Stock error handler for BMDMA controller
+ * @ap: port to handle error for
+ *
+ * Stock error handler for SFF controller. It can handle both
+ * PATA and SATA controllers. Many controllers should be able to
+ * use this EH as-is or with some added handling before and
+ * after.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_sff_error_handler(struct ata_port *ap)
+{
+ ata_reset_fn_t softreset = ap->ops->softreset;
+ ata_reset_fn_t hardreset = ap->ops->hardreset;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ int thaw = 0;
+
+ qc = __ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ qc = NULL;
+
+ /* reset PIO HSM and stop DMA engine */
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ap->ioaddr.bmdma_addr &&
+ qc && (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATAPI_PROT_DMA)) {
+ u8 host_stat;
+
+ host_stat = ap->ops->bmdma_status(ap);
+
+ /* BMDMA controllers indicate host bus error by
+ * setting DMA_ERR bit and timing out. As it wasn't
+ * really a timeout event, adjust error mask and
+ * cancel frozen state.
+ */
+ if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
+ qc->err_mask = AC_ERR_HOST_BUS;
+ thaw = 1;
+ }
+
+ ap->ops->bmdma_stop(qc);
+ }
+
+ ata_sff_sync(ap); /* FIXME: We don't need this */
+ ap->ops->sff_check_status(ap);
+ ap->ops->sff_irq_clear(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ if (thaw)
+ ata_eh_thaw_port(ap);
+
+ /* PIO and DMA engines have been stopped, perform recovery */
+
+ /* Ignore ata_sff_softreset if ctl isn't accessible and
+ * built-in hardresets if SCR access isn't available.
+ */
+ if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
+ softreset = NULL;
+ if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+ hardreset = NULL;
+
+ ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
+ ap->ops->postreset);
+}
+
+/**
+ * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
+ * @qc: internal command to clean up
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ap->ioaddr.bmdma_addr)
+ ata_bmdma_stop(qc);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_sff_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Called just after data structures for each port are
+ * initialized. Allocates space for PRD table if the device
+ * is DMA capable SFF.
+ *
+ * May be used as the port_start() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+int ata_sff_port_start(struct ata_port *ap)
+{
+ if (ap->ioaddr.bmdma_addr)
+ return ata_port_start(ap);
+ return 0;
+}
+
+/**
+ * ata_sff_std_ports - initialize ioaddr with standard port offsets.
+ * @ioaddr: IO address structure to be initialized
+ *
+ * Utility function which initializes data_addr, error_addr,
+ * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
+ * device_addr, status_addr, and command_addr to standard offsets
+ * relative to cmd_addr.
+ *
+ * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
+ */
+void ata_sff_std_ports(struct ata_ioports *ioaddr)
+{
+ ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+ ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+ ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
+ ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+ ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+ ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+ ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+ ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+ ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
+ ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+
+unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
+ unsigned long xfer_mask)
+{
+ /* Filter out DMA modes if the device has been configured by
+ the BIOS as PIO only */
+
+ if (adev->link->ap->ioaddr.bmdma_addr == NULL)
+ xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+ return xfer_mask;
+}
+
+/**
+ * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ * ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 dmactl;
+
+ /* start host DMA transaction */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ /* Strictly, one may wish to issue an ioread8() here, to
+ * flush the mmio write. However, control also passes
+ * to the hardware at this point, and it will interrupt
+ * us when we are to resume control. So, in effect,
+ * we don't care when the mmio write flushes.
+ * Further, a read of the DMA status register _immediately_
+ * following the write may not be what certain flaky hardware
+ * is expected, so I think it is best to not add a readb()
+ * without first all the MMIO ATA cards/mobos.
+ * Or maybe I'm just being paranoid.
+ *
+ * FIXME: The posting of this write means I/O starts are
+ * unneccessarily delayed for MMIO
+ */
+}
+
+/**
+ * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ * @qc: Command we are ending DMA for
+ *
+ * Clears the ATA_DMA_START flag in the dma control register
+ *
+ * May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* clear start/stop bit */
+ iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+ mmio + ATA_DMA_CMD);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap);
+}
+
+/**
+ * ata_bmdma_status - Read PCI IDE BMDMA status
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Read and return BMDMA status register.
+ *
+ * May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+
+/**
+ * ata_bus_reset - reset host port and associated ATA channel
+ * @ap: port to reset
+ *
+ * This is typically the first time we actually start issuing
+ * commands to the ATA channel. We wait for BSY to clear, then
+ * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
+ * result. Determine what devices, if any, are on the channel
+ * by looking at the device 0/1 error register. Look at the signature
+ * stored in each device's taskfile registers, to determine if
+ * the device is ATA or ATAPI.
+ *
+ * LOCKING:
+ * PCI/etc. bus probe sem.
+ * Obtains host lock.
+ *
+ * SIDE EFFECTS:
+ * Sets ATA_FLAG_DISABLED if bus reset fails.
+ *
+ * DEPRECATED:
+ * This function is only for drivers which still use old EH and
+ * will be removed soon.
+ */
+void ata_bus_reset(struct ata_port *ap)
+{
+ struct ata_device *device = ap->link.device;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ u8 err;
+ unsigned int dev0, dev1 = 0, devmask = 0;
+ int rc;
+
+ DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
+
+ /* determine if device 0/1 are present */
+ if (ap->flags & ATA_FLAG_SATA_RESET)
+ dev0 = 1;
+ else {
+ dev0 = ata_devchk(ap, 0);
+ if (slave_possible)
+ dev1 = ata_devchk(ap, 1);
+ }
+
+ if (dev0)
+ devmask |= (1 << 0);
+ if (dev1)
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* issue bus reset */
+ if (ap->flags & ATA_FLAG_SRST) {
+ rc = ata_bus_softreset(ap, devmask,
+ ata_deadline(jiffies, 40000));
+ if (rc && rc != -ENODEV)
+ goto err_out;
+ }
+
+ /*
+ * determine by signature whether we have ATA or ATAPI devices
+ */
+ device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
+ if ((slave_possible) && (err != 0x81))
+ device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
+
+ /* is double-select really necessary? */
+ if (device[1].class != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 1);
+ if (device[0].class != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* if no devices were detected, disable this port */
+ if ((device[0].class == ATA_DEV_NONE) &&
+ (device[1].class == ATA_DEV_NONE))
+ goto err_out;
+
+ if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
+ /* set up device control for ATA_FLAG_SATA_RESET */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ }
+
+ DPRINTK("EXIT\n");
+ return;
+
+err_out:
+ ata_port_printk(ap, KERN_ERR, "disabling port\n");
+ ata_port_disable(ap);
+
+ DPRINTK("EXIT\n");
+}
+
+#ifdef CONFIG_PCI
+
+/**
+ * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
+ * @pdev: PCI device
+ *
+ * Some PCI ATA devices report simplex mode but in fact can be told to
+ * enter non simplex mode. This implements the necessary logic to
+ * perform the task on such devices. Calling it on other devices will
+ * have -undefined- behaviour.
+ */
+int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
+{
+ unsigned long bmdma = pci_resource_start(pdev, 4);
+ u8 simplex;
+
+ if (bmdma == 0)
+ return -ENOENT;
+
+ simplex = inb(bmdma + 0x02);
+ outb(simplex & 0x60, bmdma + 0x02);
+ simplex = inb(bmdma + 0x02);
+ if (simplex & 0x80)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+/**
+ * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
+ * @host: target ATA host
+ *
+ * Acquire PCI BMDMA resources and initialize @host accordingly.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_bmdma_init(struct ata_host *host)
+{
+ struct device *gdev = host->dev;
+ struct pci_dev *pdev = to_pci_dev(gdev);
+ int i, rc;
+
+ /* No BAR4 allocation: No DMA */
+ if (pci_resource_start(pdev, 4) == 0)
+ return 0;
+
+ /* TODO: If we get no DMA mask we should fall back to PIO */
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ /* request and iomap DMA region */
+ rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
+ if (rc) {
+ dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
+ return -ENOMEM;
+ }
+ host->iomap = pcim_iomap_table(pdev);
+
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *bmdma = host->iomap[4] + 8 * i;
+
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ap->ioaddr.bmdma_addr = bmdma;
+ if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
+ (ioread8(bmdma + 2) & 0x80))
+ host->flags |= ATA_HOST_SIMPLEX;
+
+ ata_port_desc(ap, "bmdma 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
+ }
+
+ return 0;
+}
+
+static int ata_resources_present(struct pci_dev *pdev, int port)
+{
+ int i;
+
+ /* Check the PCI resources for this channel are enabled */
+ port = port * 2;
+ for (i = 0; i < 2; i ++) {
+ if (pci_resource_start(pdev, port + i) == 0 ||
+ pci_resource_len(pdev, port + i) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
+ * @host: target ATA host
+ *
+ * Acquire native PCI ATA resources for @host and initialize the
+ * first two ports of @host accordingly. Ports marked dummy are
+ * skipped and allocation failure makes the port dummy.
+ *
+ * Note that native PCI resources are valid even for legacy hosts
+ * as we fix up pdev resources array early in boot, so this
+ * function can be used for both native and legacy SFF hosts.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 if at least one port is initialized, -ENODEV if no port is
+ * available.
+ */
+int ata_pci_sff_init_host(struct ata_host *host)
+{
+ struct device *gdev = host->dev;
+ struct pci_dev *pdev = to_pci_dev(gdev);
+ unsigned int mask = 0;
+ int i, rc;
+
+ /* request, iomap BARs and init port addresses accordingly */
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+ int base = i * 2;
+ void __iomem * const *iomap;
+
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ /* Discard disabled ports. Some controllers show
+ * their unused channels this way. Disabled ports are
+ * made dummy.
+ */
+ if (!ata_resources_present(pdev, i)) {
+ ap->ops = &ata_dummy_port_ops;
+ continue;
+ }
+
+ rc = pcim_iomap_regions(pdev, 0x3 << base,
+ dev_driver_string(gdev));
+ if (rc) {
+ dev_printk(KERN_WARNING, gdev,
+ "failed to request/iomap BARs for port %d "
+ "(errno=%d)\n", i, rc);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ ap->ops = &ata_dummy_port_ops;
+ continue;
+ }
+ host->iomap = iomap = pcim_iomap_table(pdev);
+
+ ap->ioaddr.cmd_addr = iomap[base];
+ ap->ioaddr.altstatus_addr =
+ ap->ioaddr.ctl_addr = (void __iomem *)
+ ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, base),
+ (unsigned long long)pci_resource_start(pdev, base + 1));
+
+ mask |= 1 << i;
+ }
+
+ if (!mask) {
+ dev_printk(KERN_ERR, gdev, "no available native port\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
+ * @pdev: target PCI device
+ * @ppi: array of port_info, must be enough for two ports
+ * @r_host: out argument for the initialized ATA host
+ *
+ * Helper to allocate ATA host for @pdev, acquire all native PCI
+ * resources and initialize it accordingly in one go.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_sff_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host)
+{
+ struct ata_host *host;
+ int rc;
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to allocate ATA host\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ rc = ata_pci_sff_init_host(host);
+ if (rc)
+ goto err_out;
+
+ /* init DMA related stuff */
+ rc = ata_pci_bmdma_init(host);
+ if (rc)
+ goto err_bmdma;
+
+ devres_remove_group(&pdev->dev, NULL);
+ *r_host = host;
+ return 0;
+
+ err_bmdma:
+ /* This is necessary because PCI and iomap resources are
+ * merged and releasing the top group won't release the
+ * acquired resources if some of those have been acquired
+ * before entering this function.
+ */
+ pcim_iounmap_regions(pdev, 0xf);
+ err_out:
+ devres_release_group(&pdev->dev, NULL);
+ return rc;
+}
+
+/**
+ * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
+ * @host: target SFF ATA host
+ * @irq_handler: irq_handler used when requesting IRQ(s)
+ * @sht: scsi_host_template to use when registering the host
+ *
+ * This is the counterpart of ata_host_activate() for SFF ATA
+ * hosts. This separate helper is necessary because SFF hosts
+ * use two separate interrupts in legacy mode.
+ *
+ * LOCKING:
+ * Inherited from calling layer (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_pci_sff_activate_host(struct ata_host *host,
+ irq_handler_t irq_handler,
+ struct scsi_host_template *sht)
+{
+ struct device *dev = host->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ const char *drv_name = dev_driver_string(host->dev);
+ int legacy_mode = 0, rc;
+
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+ u8 tmp8, mask;
+
+ /* TODO: What if one channel is in native mode ... */
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+ mask = (1 << 2) | (1 << 0);
+ if ((tmp8 & mask) != mask)
+ legacy_mode = 1;
+#if defined(CONFIG_NO_ATA_LEGACY)
+ /* Some platforms with PCI limits cannot address compat
+ port space. In that case we punt if their firmware has
+ left a device in compatibility mode */
+ if (legacy_mode) {
+ printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
+ return -EOPNOTSUPP;
+ }
+#endif
+ }
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!legacy_mode && pdev->irq) {
+ rc = devm_request_irq(dev, pdev->irq, irq_handler,
+ IRQF_SHARED, drv_name, host);
+ if (rc)
+ goto out;
+
+ ata_port_desc(host->ports[0], "irq %d", pdev->irq);
+ ata_port_desc(host->ports[1], "irq %d", pdev->irq);
+ } else if (legacy_mode) {
+ if (!ata_port_is_dummy(host->ports[0])) {
+ rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
+ irq_handler, IRQF_SHARED,
+ drv_name, host);
+ if (rc)
+ goto out;
+
+ ata_port_desc(host->ports[0], "irq %d",
+ ATA_PRIMARY_IRQ(pdev));
+ }
+
+ if (!ata_port_is_dummy(host->ports[1])) {
+ rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
+ irq_handler, IRQF_SHARED,
+ drv_name, host);
+ if (rc)
+ goto out;
+
+ ata_port_desc(host->ports[1], "irq %d",
+ ATA_SECONDARY_IRQ(pdev));
+ }
+ }
+
+ rc = ata_host_register(host, sht);
+ out:
+ if (rc == 0)
+ devres_remove_group(dev, NULL);
+ else
+ devres_release_group(dev, NULL);
+
+ return rc;
+}
+
+/**
+ * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ *
+ * This is a helper function which can be called from a driver's
+ * xxx_init_one() probe function if the hardware uses traditional
+ * IDE taskfile registers.
+ *
+ * This function calls pci_enable_device(), reserves its register
+ * regions, sets the dma mask, enables bus master mode, and calls
+ * ata_device_add()
+ *
+ * ASSUMPTION:
+ * Nobody makes a single channel controller that appears solely as
+ * the secondary legacy port on PCI.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_sff_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht, void *host_priv)
+{
+ struct device *dev = &pdev->dev;
+ const struct ata_port_info *pi = NULL;
+ struct ata_host *host = NULL;
+ int i, rc;
+
+ DPRINTK("ENTER\n");
+
+ /* look up the first valid port_info */
+ for (i = 0; i < 2 && ppi[i]; i++) {
+ if (ppi[i]->port_ops != &ata_dummy_port_ops) {
+ pi = ppi[i];
+ break;
+ }
+ }
+
+ if (!pi) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "no valid port_info specified\n");
+ return -EINVAL;
+ }
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ goto out;
+
+ /* prepare and activate SFF host */
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ goto out;
+ host->private_data = host_priv;
+
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
+ out:
+ if (rc == 0)
+ devres_remove_group(&pdev->dev, NULL);
+ else
+ devres_release_group(&pdev->dev, NULL);
+
+ return rc;
+}
+
+#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL_GPL(ata_sff_port_ops);
+EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
+EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
+EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
+EXPORT_SYMBOL_GPL(ata_sff_dev_select);
+EXPORT_SYMBOL_GPL(ata_sff_check_status);
+EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
+EXPORT_SYMBOL_GPL(ata_sff_pause);
+EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
+EXPORT_SYMBOL_GPL(ata_sff_tf_load);
+EXPORT_SYMBOL_GPL(ata_sff_tf_read);
+EXPORT_SYMBOL_GPL(ata_sff_exec_command);
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
+EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
+EXPORT_SYMBOL_GPL(ata_sff_irq_on);
+EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
+EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
+EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
+EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
+EXPORT_SYMBOL_GPL(ata_sff_host_intr);
+EXPORT_SYMBOL_GPL(ata_sff_interrupt);
+EXPORT_SYMBOL_GPL(ata_sff_freeze);
+EXPORT_SYMBOL_GPL(ata_sff_thaw);
+EXPORT_SYMBOL_GPL(ata_sff_prereset);
+EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
+EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
+EXPORT_SYMBOL_GPL(ata_sff_softreset);
+EXPORT_SYMBOL_GPL(sata_sff_hardreset);
+EXPORT_SYMBOL_GPL(ata_sff_postreset);
+EXPORT_SYMBOL_GPL(ata_sff_error_handler);
+EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
+EXPORT_SYMBOL_GPL(ata_sff_port_start);
+EXPORT_SYMBOL_GPL(ata_sff_std_ports);
+EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+EXPORT_SYMBOL_GPL(ata_bus_reset);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
+EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
+EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
+#endif /* CONFIG_PCI */
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
new file mode 100644
index 0000000..fe2839e
--- /dev/null
+++ b/drivers/ata/libata.h
@@ -0,0 +1,210 @@
+/*
+ * libata.h - helper library for ATA
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ */
+
+#ifndef __LIBATA_H__
+#define __LIBATA_H__
+
+#define DRV_NAME "libata"
+#define DRV_VERSION "3.00" /* must be exactly four chars */
+
+struct ata_scsi_args {
+ struct ata_device *dev;
+ u16 *id;
+ struct scsi_cmnd *cmd;
+ void (*done)(struct scsi_cmnd *);
+};
+
+static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
+{
+ if (reset == sata_std_hardreset)
+ return 1;
+#ifdef CONFIG_ATA_SFF
+ if (reset == sata_sff_hardreset)
+ return 1;
+#endif
+ return 0;
+}
+
+/* libata-core.c */
+enum {
+ /* flags for ata_dev_read_id() */
+ ATA_READID_POSTRESET = (1 << 0), /* reading ID after reset */
+
+ /* selector for ata_down_xfermask_limit() */
+ ATA_DNXFER_PIO = 0, /* speed down PIO */
+ ATA_DNXFER_DMA = 1, /* speed down DMA */
+ ATA_DNXFER_40C = 2, /* apply 40c cable limit */
+ ATA_DNXFER_FORCE_PIO = 3, /* force PIO */
+ ATA_DNXFER_FORCE_PIO0 = 4, /* force PIO0 */
+
+ ATA_DNXFER_QUIET = (1 << 31),
+};
+
+extern unsigned int ata_print_id;
+extern struct workqueue_struct *ata_aux_wq;
+extern int atapi_passthru16;
+extern int libata_fua;
+extern int libata_noacpi;
+extern int libata_allow_tpm;
+extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
+extern void ata_force_cbl(struct ata_port *ap);
+extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
+extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
+extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
+extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+ u64 block, u32 n_block, unsigned int tf_flags,
+ unsigned int tag);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
+extern void ata_dev_disable(struct ata_device *dev);
+extern void ata_pio_queue_task(struct ata_port *ap, void *data,
+ unsigned long delay);
+extern void ata_port_flush_task(struct ata_port *ap);
+extern unsigned ata_exec_internal(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, void *buf, unsigned int buflen,
+ unsigned long timeout);
+extern unsigned ata_exec_internal_sg(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, struct scatterlist *sg,
+ unsigned int n_elem, unsigned long timeout);
+extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
+extern int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
+ unsigned int flags, u16 *id);
+extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
+extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
+ unsigned int readid_flags);
+extern int ata_dev_configure(struct ata_device *dev);
+extern int sata_down_spd_limit(struct ata_link *link);
+extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
+extern void ata_sg_clean(struct ata_queued_cmd *qc);
+extern void ata_qc_free(struct ata_queued_cmd *qc);
+extern void ata_qc_issue(struct ata_queued_cmd *qc);
+extern void __ata_qc_complete(struct ata_queued_cmd *qc);
+extern int atapi_check_dma(struct ata_queued_cmd *qc);
+extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
+extern bool ata_phys_link_online(struct ata_link *link);
+extern bool ata_phys_link_offline(struct ata_link *link);
+extern void ata_dev_init(struct ata_device *dev);
+extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
+extern int sata_link_init_spd(struct ata_link *link);
+extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
+extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy);
+extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
+
+/* libata-acpi.c */
+#ifdef CONFIG_ATA_ACPI
+extern void ata_acpi_associate_sata_port(struct ata_port *ap);
+extern void ata_acpi_associate(struct ata_host *host);
+extern void ata_acpi_dissociate(struct ata_host *host);
+extern int ata_acpi_on_suspend(struct ata_port *ap);
+extern void ata_acpi_on_resume(struct ata_port *ap);
+extern int ata_acpi_on_devcfg(struct ata_device *dev);
+extern void ata_acpi_on_disable(struct ata_device *dev);
+extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
+#else
+static inline void ata_acpi_associate_sata_port(struct ata_port *ap) { }
+static inline void ata_acpi_associate(struct ata_host *host) { }
+static inline void ata_acpi_dissociate(struct ata_host *host) { }
+static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
+static inline void ata_acpi_on_resume(struct ata_port *ap) { }
+static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
+static inline void ata_acpi_on_disable(struct ata_device *dev) { }
+static inline void ata_acpi_set_state(struct ata_port *ap,
+ pm_message_t state) { }
+#endif
+
+/* libata-scsi.c */
+extern int ata_scsi_add_hosts(struct ata_host *host,
+ struct scsi_host_template *sht);
+extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
+extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern void ata_scsi_media_change_notify(struct ata_device *dev);
+extern void ata_scsi_hotplug(struct work_struct *work);
+extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
+extern int ata_bus_probe(struct ata_port *ap);
+
+/* libata-eh.c */
+extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
+extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
+extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
+extern void ata_scsi_error(struct Scsi_Host *host);
+extern void ata_port_wait_eh(struct ata_port *ap);
+extern void ata_eh_fastdrain_timerfn(unsigned long arg);
+extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
+extern void ata_eh_detach_dev(struct ata_device *dev);
+extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
+ unsigned int action);
+extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
+ unsigned int action);
+extern void ata_eh_autopsy(struct ata_port *ap);
+extern void ata_eh_report(struct ata_port *ap);
+extern int ata_eh_reset(struct ata_link *link, int classify,
+ ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
+ ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
+extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
+ ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ ata_postreset_fn_t postreset,
+ struct ata_link **r_failed_disk);
+extern void ata_eh_finish(struct ata_port *ap);
+
+/* libata-pmp.c */
+#ifdef CONFIG_SATA_PMP
+extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_pmp_attach(struct ata_device *dev);
+#else /* CONFIG_SATA_PMP */
+static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ return -EINVAL;
+}
+
+static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ return -EINVAL;
+}
+
+static inline int sata_pmp_attach(struct ata_device *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_SATA_PMP */
+
+/* libata-sff.c */
+#ifdef CONFIG_ATA_SFF
+extern void ata_dev_select(struct ata_port *ap, unsigned int device,
+ unsigned int wait, unsigned int can_sleep);
+extern u8 ata_irq_on(struct ata_port *ap);
+extern void ata_pio_task(struct work_struct *work);
+#endif /* CONFIG_ATA_SFF */
+
+#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
new file mode 100644
index 0000000..e2e332d
--- /dev/null
+++ b/drivers/ata/pata_acpi.c
@@ -0,0 +1,305 @@
+/*
+ * ACPI PATA driver
+ *
+ * (c) 2007 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acnames.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acparser.h>
+#include <acpi/acexcep.h>
+#include <acpi/acmacros.h>
+#include <acpi/actypes.h>
+
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_acpi"
+#define DRV_VERSION "0.2.3"
+
+struct pata_acpi {
+ struct ata_acpi_gtm gtm;
+ void *last;
+ unsigned long mask[2];
+};
+
+/**
+ * pacpi_pre_reset - check for 40/80 pin
+ * @ap: Port
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the PATA port setup we need.
+ */
+
+static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pata_acpi *acpi = ap->private_data;
+ if (ap->acpi_handle == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
+ return -ENODEV;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * pacpi_cable_detect - cable type detection
+ * @ap: port to detect
+ *
+ * Perform device specific cable detection
+ */
+
+static int pacpi_cable_detect(struct ata_port *ap)
+{
+ struct pata_acpi *acpi = ap->private_data;
+
+ if ((acpi->mask[0] | acpi->mask[1]) & (0xF8 << ATA_SHIFT_UDMA))
+ return ATA_CBL_PATA80;
+ else
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * pacpi_discover_modes - filter non ACPI modes
+ * @adev: ATA device
+ * @mask: proposed modes
+ *
+ * Try the modes available and see which ones the ACPI method will
+ * set up sensibly. From this we get a mask of ACPI modes we can use
+ */
+
+static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pata_acpi *acpi = ap->private_data;
+ struct ata_acpi_gtm probe;
+ unsigned int xfer_mask;
+
+ probe = acpi->gtm;
+
+ ata_acpi_gtm(ap, &probe);
+
+ xfer_mask = ata_acpi_gtm_xfermask(adev, &probe);
+
+ if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
+ ap->cbl = ATA_CBL_PATA80;
+
+ return xfer_mask;
+}
+
+/**
+ * pacpi_mode_filter - mode filter for ACPI
+ * @adev: device
+ * @mask: mask of valid modes
+ *
+ * Filter the valid mode list according to our own specific rules, in
+ * this case the list of discovered valid modes obtained by ACPI probing
+ */
+
+static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ struct pata_acpi *acpi = adev->link->ap->private_data;
+ return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]);
+}
+
+/**
+ * pacpi_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ */
+
+static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ int unit = adev->devno;
+ struct pata_acpi *acpi = ap->private_data;
+ const struct ata_timing *t;
+
+ if (!(acpi->gtm.flags & 0x10))
+ unit = 0;
+
+ /* Now stuff the nS values into the structure */
+ t = ata_timing_find_mode(adev->pio_mode);
+ acpi->gtm.drive[unit].pio = t->cycle;
+ ata_acpi_stm(ap, &acpi->gtm);
+ /* See what mode we actually got */
+ ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ * pacpi_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ */
+
+static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ int unit = adev->devno;
+ struct pata_acpi *acpi = ap->private_data;
+ const struct ata_timing *t;
+
+ if (!(acpi->gtm.flags & 0x10))
+ unit = 0;
+
+ /* Now stuff the nS values into the structure */
+ t = ata_timing_find_mode(adev->dma_mode);
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ acpi->gtm.drive[unit].dma = t->udma;
+ acpi->gtm.flags |= (1 << (2 * unit));
+ } else {
+ acpi->gtm.drive[unit].dma = t->cycle;
+ acpi->gtm.flags &= ~(1 << (2 * unit));
+ }
+ ata_acpi_stm(ap, &acpi->gtm);
+ /* See what mode we actually got */
+ ata_acpi_gtm(ap, &acpi->gtm);
+}
+
+/**
+ * pacpi_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * neccessary.
+ */
+
+static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct pata_acpi *acpi = ap->private_data;
+
+ if (acpi->gtm.flags & 0x10)
+ return ata_sff_qc_issue(qc);
+
+ if (adev != acpi->last) {
+ pacpi_set_piomode(ap, adev);
+ if (ata_dma_enabled(adev))
+ pacpi_set_dmamode(ap, adev);
+ acpi->last = adev;
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+/**
+ * pacpi_port_start - port setup
+ * @ap: ATA port being set up
+ *
+ * Use the port_start hook to maintain private control structures
+ */
+
+static int pacpi_port_start(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pata_acpi *acpi;
+
+ int ret;
+
+ if (ap->acpi_handle == NULL)
+ return -ENODEV;
+
+ acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
+ if (ap->private_data == NULL)
+ return -ENOMEM;
+ acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
+ acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
+ ret = ata_sff_port_start(ap);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static struct scsi_host_template pacpi_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pacpi_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = pacpi_qc_issue,
+ .cable_detect = pacpi_cable_detect,
+ .mode_filter = pacpi_mode_filter,
+ .set_piomode = pacpi_set_piomode,
+ .set_dmamode = pacpi_set_dmamode,
+ .prereset = pacpi_pre_reset,
+ .port_start = pacpi_port_start,
+};
+
+
+/**
+ * pacpi_init_one - Register ACPI ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in pacpi_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x7f,
+
+ .port_ops = &pacpi_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ if (pdev->vendor == PCI_VENDOR_ID_ATI) {
+ int rc = pcim_enable_device(pdev);
+ if (rc < 0)
+ return rc;
+ pcim_pin_device(pdev);
+ }
+ return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
+}
+
+static const struct pci_device_id pacpi_pci_tbl[] = {
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
+ { } /* terminate list */
+};
+
+static struct pci_driver pacpi_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pacpi_pci_tbl,
+ .probe = pacpi_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init pacpi_init(void)
+{
+ return pci_register_driver(&pacpi_pci_driver);
+}
+
+static void __exit pacpi_exit(void)
+{
+ pci_unregister_driver(&pacpi_pci_driver);
+}
+
+module_init(pacpi_init);
+module_exit(pacpi_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for ATA in ACPI mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pacpi_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
new file mode 100644
index 0000000..73c466e
--- /dev/null
+++ b/drivers/ata/pata_ali.c
@@ -0,0 +1,610 @@
+/*
+ * pata_ali.c - ALI 15x3 PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * based in part upon
+ * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
+ *
+ * Copyright (C) 1998-2000 Michel Aubry, Maintainer
+ * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
+ * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
+ *
+ * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
+ * May be copied or modified under the terms of the GNU General Public License
+ * Copyright (C) 2002 Alan Cox <alan@redhat.com>
+ * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
+ *
+ * Documentation
+ * Chipset documentation available under NDA only
+ *
+ * TODO/CHECK
+ * Cannot have ATAPI on both master & slave for rev < c2 (???) but
+ * otherwise should do atapi DMA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_ali"
+#define DRV_VERSION "0.7.5"
+
+static int ali_atapi_dma = 0;
+module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
+MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
+
+/*
+ * Cable special cases
+ */
+
+static const struct dmi_system_id cable_dmi_table[] = {
+ {
+ .ident = "HP Pavilion N5430",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
+ },
+ },
+ {
+ .ident = "Toshiba Satelite S1800-814",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
+ },
+ },
+ { }
+};
+
+static int ali_cable_override(struct pci_dev *pdev)
+{
+ /* Fujitsu P2000 */
+ if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
+ return 1;
+ /* Mitac 8317 (Winbook-A) and relatives */
+ if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317)
+ return 1;
+ /* Systems by DMI */
+ if (dmi_check_system(cable_dmi_table))
+ return 1;
+ return 0;
+}
+
+/**
+ * ali_c2_cable_detect - cable detection
+ * @ap: ATA port
+ *
+ * Perform cable detection for C2 and later revisions
+ */
+
+static int ali_c2_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ata66;
+
+ /* Certain laptops use short but suitable cables and don't
+ implement the detect logic */
+
+ if (ali_cable_override(pdev))
+ return ATA_CBL_PATA40_SHORT;
+
+ /* Host view cable detect 0x4A bit 0 primary bit 1 secondary
+ Bit set for 40 pin */
+ pci_read_config_byte(pdev, 0x4A, &ata66);
+ if (ata66 & (1 << ap->port_no))
+ return ATA_CBL_PATA40;
+ else
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * ali_20_filter - filter for earlier ALI DMA
+ * @ap: ALi ATA port
+ * @adev: attached device
+ *
+ * Ensure that we do not do DMA on CD devices. We may be able to
+ * fix that later on. Also ensure we do not do UDMA on WDC drives
+ */
+
+static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
+{
+ char model_num[ATA_ID_PROD_LEN + 1];
+ /* No DMA on anything but a disk for now */
+ if (adev->class != ATA_DEV_ATA)
+ mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+ ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ if (strstr(model_num, "WDC"))
+ return mask &= ~ATA_MASK_UDMA;
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * ali_fifo_control - FIFO manager
+ * @ap: ALi channel to control
+ * @adev: device for FIFO control
+ * @on: 0 for off 1 for on
+ *
+ * Enable or disable the FIFO on a given device. Because of the way the
+ * ALi FIFO works it provides a boost on ATA disk but can be confused by
+ * ATAPI and we must therefore manage it.
+ */
+
+static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int pio_fifo = 0x54 + ap->port_no;
+ u8 fifo;
+ int shift = 4 * adev->devno;
+
+ /* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
+ 0x00. Not all the docs agree but the behaviour we now use is the
+ one stated in the BIOS Programming Guide */
+
+ pci_read_config_byte(pdev, pio_fifo, &fifo);
+ fifo &= ~(0x0F << shift);
+ if (on)
+ fifo |= (on << shift);
+ pci_write_config_byte(pdev, pio_fifo, fifo);
+}
+
+/**
+ * ali_program_modes - load mode registers
+ * @ap: ALi channel to load
+ * @adev: Device the timing is for
+ * @cmd: Command timing
+ * @data: Data timing
+ * @ultra: UDMA timing or zero for off
+ *
+ * Loads the timing registers for cmd/data and disable UDMA if
+ * ultra is zero. If ultra is set then load and enable the UDMA
+ * timing but do not touch the command/data timing.
+ */
+
+static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int cas = 0x58 + 4 * ap->port_no; /* Command timing */
+ int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
+ int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
+ int udmat = 0x56 + ap->port_no; /* UDMA timing */
+ int shift = 4 * adev->devno;
+ u8 udma;
+
+ if (t != NULL) {
+ t->setup = clamp_val(t->setup, 1, 8) & 7;
+ t->act8b = clamp_val(t->act8b, 1, 8) & 7;
+ t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
+ t->active = clamp_val(t->active, 1, 8) & 7;
+ t->recover = clamp_val(t->recover, 1, 16) & 15;
+
+ pci_write_config_byte(pdev, cas, t->setup);
+ pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
+ pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
+ }
+
+ /* Set up the UDMA enable */
+ pci_read_config_byte(pdev, udmat, &udma);
+ udma &= ~(0x0F << shift);
+ udma |= ultra << shift;
+ pci_write_config_byte(pdev, udmat, udma);
+}
+
+/**
+ * ali_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the ALi registers for PIO mode. FIXME: add timings for
+ * PIO5.
+ */
+
+static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_device *pair = ata_dev_pair(adev);
+ struct ata_timing t;
+ unsigned long T = 1000000000 / 33333; /* PCI clock based */
+
+ ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
+ if (pair) {
+ struct ata_timing p;
+ ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+ }
+ }
+
+ /* PIO FIFO is only permitted on ATA disk */
+ if (adev->class != ATA_DEV_ATA)
+ ali_fifo_control(ap, adev, 0x00);
+ ali_program_modes(ap, adev, &t, 0);
+ if (adev->class == ATA_DEV_ATA)
+ ali_fifo_control(ap, adev, 0x05);
+
+}
+
+/**
+ * ali_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * FIXME: MWDMA timings
+ */
+
+static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
+ struct ata_device *pair = ata_dev_pair(adev);
+ struct ata_timing t;
+ unsigned long T = 1000000000 / 33333; /* PCI clock based */
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+
+ if (adev->class == ATA_DEV_ATA)
+ ali_fifo_control(ap, adev, 0x08);
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
+ if (adev->dma_mode >= XFER_UDMA_3) {
+ u8 reg4b;
+ pci_read_config_byte(pdev, 0x4B, &reg4b);
+ reg4b |= 1;
+ pci_write_config_byte(pdev, 0x4B, reg4b);
+ }
+ } else {
+ ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
+ if (pair) {
+ struct ata_timing p;
+ ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
+ }
+ }
+ ali_program_modes(ap, adev, &t, 0);
+ }
+}
+
+/**
+ * ali_warn_atapi_dma - Warn about ATAPI DMA disablement
+ * @adev: Device
+ *
+ * Whine about ATAPI DMA disablement if @adev is an ATAPI device.
+ * Can be used as ->dev_config.
+ */
+
+static void ali_warn_atapi_dma(struct ata_device *adev)
+{
+ struct ata_eh_context *ehc = &adev->link->eh_context;
+ int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
+
+ if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
+ ata_dev_printk(adev, KERN_WARNING,
+ "WARNING: ATAPI DMA disabled for reliablity issues. It can be enabled\n");
+ ata_dev_printk(adev, KERN_WARNING,
+ "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
+ }
+}
+
+/**
+ * ali_lock_sectors - Keep older devices to 255 sector mode
+ * @adev: Device
+ *
+ * Called during the bus probe for each device that is found. We use
+ * this call to lock the sector count of the device to 255 or less on
+ * older ALi controllers. If we didn't do this then large I/O's would
+ * require LBA48 commands which the older ALi requires are issued by
+ * slower PIO methods
+ */
+
+static void ali_lock_sectors(struct ata_device *adev)
+{
+ adev->max_sectors = 255;
+ ali_warn_atapi_dma(adev);
+}
+
+/**
+ * ali_check_atapi_dma - DMA check for most ALi controllers
+ * @adev: Device
+ *
+ * Called to decide whether commands should be sent by DMA or PIO
+ */
+
+static int ali_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ if (!ali_atapi_dma) {
+ /* FIXME: pata_ali can't do ATAPI DMA reliably but the
+ * IDE alim15x3 driver can. I tried lots of things
+ * but couldn't find what the actual difference was.
+ * If you got an idea, please write it to
+ * linux-ide@vger.kernel.org and cc htejun@gmail.com.
+ *
+ * Disable ATAPI DMA for now.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ /* If its not a media command, its not worth it */
+ if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static struct scsi_host_template ali_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Port operations for PIO only ALi
+ */
+
+static struct ata_port_operations ali_early_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ali_set_piomode,
+};
+
+static const struct ata_port_operations ali_dma_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_piomode = ali_set_piomode,
+ .set_dmamode = ali_set_dmamode,
+};
+
+/*
+ * Port operations for DMA capable ALi without cable
+ * detect
+ */
+static struct ata_port_operations ali_20_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .cable_detect = ata_cable_40wire,
+ .mode_filter = ali_20_filter,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .dev_config = ali_lock_sectors,
+};
+
+/*
+ * Port operations for DMA capable ALi with cable detect
+ */
+static struct ata_port_operations ali_c2_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+ .dev_config = ali_lock_sectors,
+};
+
+/*
+ * Port operations for DMA capable ALi with cable detect and LBA48
+ */
+static struct ata_port_operations ali_c5_port_ops = {
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .dev_config = ali_warn_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+};
+
+
+/**
+ * ali_init_chipset - chip setup function
+ * @pdev: PCI device of ATA controller
+ *
+ * Perform the setup on the device that must be done both at boot
+ * and at resume time.
+ */
+
+static void ali_init_chipset(struct pci_dev *pdev)
+{
+ u8 tmp;
+ struct pci_dev *north, *isa_bridge;
+
+ /*
+ * The chipset revision selects the driver operations and
+ * mode data.
+ */
+
+ if (pdev->revision >= 0x20 && pdev->revision < 0xC2) {
+ /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
+ pci_read_config_byte(pdev, 0x4B, &tmp);
+ /* Clear CD-ROM DMA write bit */
+ tmp &= 0x7F;
+ pci_write_config_byte(pdev, 0x4B, tmp);
+ } else if (pdev->revision >= 0xC2) {
+ /* Enable cable detection logic */
+ pci_read_config_byte(pdev, 0x4B, &tmp);
+ pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
+ }
+ north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+ if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) {
+ /* Configure the ALi bridge logic. For non ALi rely on BIOS.
+ Set the south bridge enable bit */
+ pci_read_config_byte(isa_bridge, 0x79, &tmp);
+ if (pdev->revision == 0xC2)
+ pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
+ else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
+ pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
+ }
+ if (pdev->revision >= 0x20) {
+ /*
+ * CD_ROM DMA on (0x53 bit 0). Enable this even if we want
+ * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
+ * via 0x54/55.
+ */
+ pci_read_config_byte(pdev, 0x53, &tmp);
+ if (pdev->revision <= 0x20)
+ tmp &= ~0x02;
+ if (pdev->revision >= 0xc7)
+ tmp |= 0x03;
+ else
+ tmp |= 0x01; /* CD_ROM enable for DMA */
+ pci_write_config_byte(pdev, 0x53, tmp);
+ }
+ pci_dev_put(isa_bridge);
+ pci_dev_put(north);
+ ata_pci_bmdma_clear_simplex(pdev);
+}
+/**
+ * ali_init_one - discovery callback
+ * @pdev: PCI device ID
+ * @id: PCI table info
+ *
+ * An ALi IDE interface has been discovered. Figure out what revision
+ * and perform configuration work before handing it to the ATA layer
+ */
+
+static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info_early = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &ali_early_port_ops
+ };
+ /* Revision 0x20 added DMA */
+ static const struct ata_port_info info_20 = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &ali_20_port_ops
+ };
+ /* Revision 0x20 with support logic added UDMA */
+ static const struct ata_port_info info_20_udma = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07, /* UDMA33 */
+ .port_ops = &ali_20_port_ops
+ };
+ /* Revision 0xC2 adds UDMA66 */
+ static const struct ata_port_info info_c2 = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ali_c2_port_ops
+ };
+ /* Revision 0xC3 is UDMA66 for now */
+ static const struct ata_port_info info_c3 = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &ali_c2_port_ops
+ };
+ /* Revision 0xC4 is UDMA100 */
+ static const struct ata_port_info info_c4 = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &ali_c2_port_ops
+ };
+ /* Revision 0xC5 is UDMA133 with LBA48 DMA */
+ static const struct ata_port_info info_c5 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ali_c5_port_ops
+ };
+
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ u8 tmp;
+ struct pci_dev *isa_bridge;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /*
+ * The chipset revision selects the driver operations and
+ * mode data.
+ */
+
+ if (pdev->revision < 0x20) {
+ ppi[0] = &info_early;
+ } else if (pdev->revision < 0xC2) {
+ ppi[0] = &info_20;
+ } else if (pdev->revision == 0xC2) {
+ ppi[0] = &info_c2;
+ } else if (pdev->revision == 0xC3) {
+ ppi[0] = &info_c3;
+ } else if (pdev->revision == 0xC4) {
+ ppi[0] = &info_c4;
+ } else
+ ppi[0] = &info_c5;
+
+ ali_init_chipset(pdev);
+
+ isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+ if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
+ /* Are we paired with a UDMA capable chip */
+ pci_read_config_byte(isa_bridge, 0x5E, &tmp);
+ if ((tmp & 0x1E) == 0x12)
+ ppi[0] = &info_20_udma;
+ }
+ pci_dev_put(isa_bridge);
+
+ return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int ali_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ ali_init_chipset(pdev);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id ali[] = {
+ { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
+ { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
+
+ { },
+};
+
+static struct pci_driver ali_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ali,
+ .probe = ali_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ali_reinit_one,
+#endif
+};
+
+static int __init ali_init(void)
+{
+ return pci_register_driver(&ali_pci_driver);
+}
+
+
+static void __exit ali_exit(void)
+{
+ pci_unregister_driver(&ali_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ALi PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ali);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ali_init);
+module_exit(ali_exit);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
new file mode 100644
index 0000000..0ec9c7d
--- /dev/null
+++ b/drivers/ata/pata_amd.c
@@ -0,0 +1,609 @@
+/*
+ * pata_amd.c - AMD PATA for new ATA layer
+ * (C) 2005-2006 Red Hat Inc
+ *
+ * Based on pata-sil680. Errata information is taken from data sheets
+ * and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
+ * claimed by sata-nv.c.
+ *
+ * TODO:
+ * Variable system clock when/if it makes sense
+ * Power management on ports
+ *
+ *
+ * Documentation publically available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_amd"
+#define DRV_VERSION "0.3.10"
+
+/**
+ * timing_setup - shared timing computation and load
+ * @ap: ATA port being set up
+ * @adev: drive being configured
+ * @offset: port offset
+ * @speed: target speed
+ * @clock: clock multiplier (number of times 33MHz for this part)
+ *
+ * Perform the actual timing set up for Nvidia or AMD PATA devices.
+ * The actual devices vary so they all call into this helper function
+ * providing the clock multipler and offset (because AMD and Nvidia put
+ * the ports at different locations).
+ */
+
+static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
+{
+ static const unsigned char amd_cyc2udma[] = {
+ 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
+ };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_device *peer = ata_dev_pair(adev);
+ int dn = ap->port_no * 2 + adev->devno;
+ struct ata_timing at, apeer;
+ int T, UT;
+ const int amd_clock = 33333; /* KHz. */
+ u8 t;
+
+ T = 1000000000 / amd_clock;
+ UT = T;
+ if (clock >= 2)
+ UT = T / 2;
+
+ if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
+ dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
+ return;
+ }
+
+ if (peer) {
+ /* This may be over conservative */
+ if (peer->dma_mode) {
+ ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
+ ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+ }
+ ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
+ ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
+ }
+
+ if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
+ if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
+
+ /*
+ * Now do the setup work
+ */
+
+ /* Configure the address set up timing */
+ pci_read_config_byte(pdev, offset + 0x0C, &t);
+ t = (t & ~(3 << ((3 - dn) << 1))) | ((clamp_val(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
+ pci_write_config_byte(pdev, offset + 0x0C , t);
+
+ /* Configure the 8bit I/O timing */
+ pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
+ ((clamp_val(at.act8b, 1, 16) - 1) << 4) | (clamp_val(at.rec8b, 1, 16) - 1));
+
+ /* Drive timing */
+ pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
+ ((clamp_val(at.active, 1, 16) - 1) << 4) | (clamp_val(at.recover, 1, 16) - 1));
+
+ switch (clock) {
+ case 1:
+ t = at.udma ? (0xc0 | (clamp_val(at.udma, 2, 5) - 2)) : 0x03;
+ break;
+
+ case 2:
+ t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 2, 10)]) : 0x03;
+ break;
+
+ case 3:
+ t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 10)]) : 0x03;
+ break;
+
+ case 4:
+ t = at.udma ? (0xc0 | amd_cyc2udma[clamp_val(at.udma, 1, 15)]) : 0x03;
+ break;
+
+ default:
+ return;
+ }
+
+ /* UDMA timing */
+ if (at.udma)
+ pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
+}
+
+/**
+ * amd_pre_reset - perform reset handling
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Reset sequence checking enable bits to see which ports are
+ * active.
+ */
+
+static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits amd_enable_bits[] = {
+ { 0x40, 1, 0x02, 0x02 },
+ { 0x40, 1, 0x01, 0x01 }
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+static int amd_cable_detect(struct ata_port *ap)
+{
+ static const u32 bitmask[2] = {0x03, 0x0C};
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ata66;
+
+ pci_read_config_byte(pdev, 0x42, &ata66);
+ if (ata66 & bitmask[ap->port_no])
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * amd33_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the AMD registers for PIO mode.
+ */
+
+static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
+}
+
+static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
+}
+
+static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
+}
+
+static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
+}
+
+/**
+ * amd33_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the MWDMA/UDMA modes for the AMD and Nvidia
+ * chipset.
+ */
+
+static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
+}
+
+static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
+}
+
+static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
+}
+
+static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
+}
+
+/* Both host-side and drive-side detection results are worthless on NV
+ * PATAs. Ignore them and just follow what BIOS configured. Both the
+ * current configuration in PCI config reg and ACPI GTM result are
+ * cached during driver attach and are consulted to select transfer
+ * mode.
+ */
+static unsigned long nv_mode_filter(struct ata_device *dev,
+ unsigned long xfer_mask)
+{
+ static const unsigned int udma_mask_map[] =
+ { ATA_UDMA2, ATA_UDMA1, ATA_UDMA0, 0,
+ ATA_UDMA3, ATA_UDMA4, ATA_UDMA5, ATA_UDMA6 };
+ struct ata_port *ap = dev->link->ap;
+ char acpi_str[32] = "";
+ u32 saved_udma, udma;
+ const struct ata_acpi_gtm *gtm;
+ unsigned long bios_limit = 0, acpi_limit = 0, limit;
+
+ /* find out what BIOS configured */
+ udma = saved_udma = (unsigned long)ap->host->private_data;
+
+ if (ap->port_no == 0)
+ udma >>= 16;
+ if (dev->devno == 0)
+ udma >>= 8;
+
+ if ((udma & 0xc0) == 0xc0)
+ bios_limit = ata_pack_xfermask(0, 0, udma_mask_map[udma & 0x7]);
+
+ /* consult ACPI GTM too */
+ gtm = ata_acpi_init_gtm(ap);
+ if (gtm) {
+ acpi_limit = ata_acpi_gtm_xfermask(dev, gtm);
+
+ snprintf(acpi_str, sizeof(acpi_str), " (%u:%u:0x%x)",
+ gtm->drive[0].dma, gtm->drive[1].dma, gtm->flags);
+ }
+
+ /* be optimistic, EH can take care of things if something goes wrong */
+ limit = bios_limit | acpi_limit;
+
+ /* If PIO or DMA isn't configured at all, don't limit. Let EH
+ * handle it.
+ */
+ if (!(limit & ATA_MASK_PIO))
+ limit |= ATA_MASK_PIO;
+ if (!(limit & (ATA_MASK_MWDMA | ATA_MASK_UDMA)))
+ limit |= ATA_MASK_MWDMA | ATA_MASK_UDMA;
+
+ ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
+ "BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
+ xfer_mask, limit, xfer_mask & limit, bios_limit,
+ saved_udma, acpi_limit, acpi_str);
+
+ return xfer_mask & limit;
+}
+
+/**
+ * nv_probe_init - cable detection
+ * @lin: ATA link
+ *
+ * Perform cable detection. The BIOS stores this in PCI config
+ * space for us.
+ */
+
+static int nv_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits nv_enable_bits[] = {
+ { 0x50, 1, 0x02, 0x02 },
+ { 0x50, 1, 0x01, 0x01 }
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * nv100_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the AMD registers for PIO mode.
+ */
+
+static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
+}
+
+static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
+}
+
+/**
+ * nv100_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the MWDMA/UDMA modes for the AMD and Nvidia
+ * chipset.
+ */
+
+static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
+}
+
+static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
+}
+
+static void nv_host_stop(struct ata_host *host)
+{
+ u32 udma = (unsigned long)host->private_data;
+
+ /* restore PCI config register 0x60 */
+ pci_write_config_dword(to_pci_dev(host->dev), 0x60, udma);
+}
+
+static struct scsi_host_template amd_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations amd_base_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = amd_pre_reset,
+};
+
+static struct ata_port_operations amd33_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = amd33_set_piomode,
+ .set_dmamode = amd33_set_dmamode,
+};
+
+static struct ata_port_operations amd66_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd66_set_piomode,
+ .set_dmamode = amd66_set_dmamode,
+};
+
+static struct ata_port_operations amd100_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = amd100_set_piomode,
+ .set_dmamode = amd100_set_dmamode,
+};
+
+static struct ata_port_operations amd133_port_ops = {
+ .inherits = &amd_base_port_ops,
+ .cable_detect = amd_cable_detect,
+ .set_piomode = amd133_set_piomode,
+ .set_dmamode = amd133_set_dmamode,
+};
+
+static const struct ata_port_operations nv_base_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_ignore,
+ .mode_filter = nv_mode_filter,
+ .prereset = nv_pre_reset,
+ .host_stop = nv_host_stop,
+};
+
+static struct ata_port_operations nv100_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv100_set_piomode,
+ .set_dmamode = nv100_set_dmamode,
+};
+
+static struct ata_port_operations nv133_port_ops = {
+ .inherits = &nv_base_port_ops,
+ .set_piomode = nv133_set_piomode,
+ .set_dmamode = nv133_set_dmamode,
+};
+
+static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info[10] = {
+ { /* 0: AMD 7401 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07, /* No SWDMA */
+ .udma_mask = 0x07, /* UDMA 33 */
+ .port_ops = &amd33_port_ops
+ },
+ { /* 1: Early AMD7409 - no swdma */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4, /* UDMA 66 */
+ .port_ops = &amd66_port_ops
+ },
+ { /* 2: AMD 7409, no swdma errata */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4, /* UDMA 66 */
+ .port_ops = &amd66_port_ops
+ },
+ { /* 3: AMD 7411 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5, /* UDMA 100 */
+ .port_ops = &amd100_port_ops
+ },
+ { /* 4: AMD 7441 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5, /* UDMA 100 */
+ .port_ops = &amd100_port_ops
+ },
+ { /* 5: AMD 8111*/
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
+ .port_ops = &amd133_port_ops
+ },
+ { /* 6: AMD 8111 UDMA 100 (Serenade) */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5, /* UDMA 100, no swdma */
+ .port_ops = &amd133_port_ops
+ },
+ { /* 7: Nvidia Nforce */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5, /* UDMA 100 */
+ .port_ops = &nv100_port_ops
+ },
+ { /* 8: Nvidia Nforce2 and later */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6, /* UDMA 133, no swdma */
+ .port_ops = &nv133_port_ops
+ },
+ { /* 9: AMD CS5536 (Geode companion) */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5, /* UDMA 100 */
+ .port_ops = &amd100_port_ops
+ }
+ };
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ static int printed_version;
+ int type = id->driver_data;
+ void *hpriv = NULL;
+ u8 fifo;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ pci_read_config_byte(pdev, 0x41, &fifo);
+
+ /* Check for AMD7409 without swdma errata and if found adjust type */
+ if (type == 1 && pdev->revision > 0x7)
+ type = 2;
+
+ /* Serenade ? */
+ if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
+ pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
+ type = 6; /* UDMA 100 only */
+
+ /*
+ * Okay, type is determined now. Apply type-specific workarounds.
+ */
+ ppi[0] = &info[type];
+
+ if (type < 3)
+ ata_pci_bmdma_clear_simplex(pdev);
+
+ /* Check for AMD7411 */
+ if (type == 3)
+ /* FIFO is broken */
+ pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
+ else
+ pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
+
+ /* Cable detection on Nvidia chips doesn't work too well,
+ * cache BIOS programmed UDMA mode.
+ */
+ if (type == 7 || type == 8) {
+ u32 udma;
+
+ pci_read_config_dword(pdev, 0x60, &udma);
+ hpriv = (void *)(unsigned long)udma;
+ }
+
+ /* And fire it up */
+ return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv);
+}
+
+#ifdef CONFIG_PM
+static int amd_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD) {
+ u8 fifo;
+ pci_read_config_byte(pdev, 0x41, &fifo);
+ if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7411)
+ /* FIFO is broken */
+ pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
+ else
+ pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
+ if (pdev->device == PCI_DEVICE_ID_AMD_VIPER_7409 ||
+ pdev->device == PCI_DEVICE_ID_AMD_COBRA_7401)
+ ata_pci_bmdma_clear_simplex(pdev);
+ }
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id amd[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+
+ { },
+};
+
+static struct pci_driver amd_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = amd,
+ .probe = amd_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = amd_reinit_one,
+#endif
+};
+
+static int __init amd_init(void)
+{
+ return pci_register_driver(&amd_pci_driver);
+}
+
+static void __exit amd_exit(void)
+{
+ pci_unregister_driver(&amd_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, amd);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(amd_init);
+module_exit(amd_exit);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
new file mode 100644
index 0000000..6b3092c
--- /dev/null
+++ b/drivers/ata/pata_artop.c
@@ -0,0 +1,441 @@
+/*
+ * pata_artop.c - ARTOP ATA controller driver
+ *
+ * (C) 2006 Red Hat
+ * (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * Based in part on drivers/ide/pci/aec62xx.c
+ * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
+ * 865/865R fixes for Macintosh card version from a patch to the old
+ * driver by Thibaut VARENE <varenet@parisc-linux.org>
+ * When setting the PCI latency we must set 0x80 or higher for burst
+ * performance Alessandro Zummo <alessandro.zummo@towertech.it>
+ *
+ * TODO
+ * 850 serialization once the core supports it
+ * Investigate no_dsc on 850R
+ * Clock detect
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_artop"
+#define DRV_VERSION "0.4.4"
+
+/*
+ * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
+ * get PCI bus speed functionality we leave this as 0. Its a variable
+ * for when we get the functionality and also for folks wanting to
+ * test stuff.
+ */
+
+static int clock = 0;
+
+static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ const struct pci_bits artop_enable_bits[] = {
+ { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
+ { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
+ };
+
+ if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * artop6260_pre_reset - check for 40/80 pin
+ * @link: link
+ * @deadline: deadline jiffies for the operation
+ *
+ * The ARTOP hardware reports the cable detect bits in register 0x49.
+ * Nothing complicated needed here.
+ */
+
+static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits artop_enable_bits[] = {
+ { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
+ { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ /* Odd numbered device ids are the units with enable bits (the -R cards) */
+ if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * artop6260_cable_detect - identify cable type
+ * @ap: Port
+ *
+ * Identify the cable type for the ARTOP interface in question
+ */
+
+static int artop6260_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 tmp;
+ pci_read_config_byte(pdev, 0x49, &tmp);
+ if (tmp & (1 << ap->port_no))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * artop6210_load_piomode - Load a set of PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device
+ * @pio: PIO mode
+ *
+ * Set PIO mode for device, in host controller PCI config space. This
+ * is used both to set PIO timings in PIO mode and also to set the
+ * matching PIO clocking for UDMA, as well as the MWDMA timings.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = adev->devno + 2 * ap->port_no;
+ const u16 timing[2][5] = {
+ { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
+ { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
+
+ };
+ /* Load the PIO timing active/recovery bits */
+ pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
+}
+
+/**
+ * artop6210_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring
+ *
+ * Set PIO mode for device, in host controller PCI config space. For
+ * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ * the event UDMA is used the later call to set_dmamode will set the
+ * bits as required.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = adev->devno + 2 * ap->port_no;
+ u8 ultra;
+
+ artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+ /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+ pci_read_config_byte(pdev, 0x54, &ultra);
+ ultra &= ~(3 << (2 * dn));
+ pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ * artop6260_load_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring
+ * @pio: PIO mode
+ *
+ * Set PIO mode for device, in host controller PCI config space. The
+ * ARTOP6260 and relatives store the timing data differently.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = adev->devno + 2 * ap->port_no;
+ const u8 timing[2][5] = {
+ { 0x00, 0x0A, 0x08, 0x33, 0x31 },
+ { 0x70, 0x7A, 0x78, 0x43, 0x41 }
+
+ };
+ /* Load the PIO timing active/recovery bits */
+ pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
+}
+
+/**
+ * artop6260_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring
+ *
+ * Set PIO mode for device, in host controller PCI config space. For
+ * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
+ * the event UDMA is used the later call to set_dmamode will set the
+ * bits as required.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ultra;
+
+ artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+
+ /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
+ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+ ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
+ pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+/**
+ * artop6210_set_dmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device whose timings we are configuring
+ *
+ * Set DMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = adev->devno + 2 * ap->port_no;
+ u8 ultra;
+
+ if (adev->dma_mode == XFER_MW_DMA_0)
+ pio = 1;
+ else
+ pio = 4;
+
+ /* Load the PIO timing active/recovery bits */
+ artop6210_load_piomode(ap, adev, pio);
+
+ pci_read_config_byte(pdev, 0x54, &ultra);
+ ultra &= ~(3 << (2 * dn));
+
+ /* Add ultra DMA bits if in UDMA mode */
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
+ if (mode == 0)
+ mode = 1;
+ ultra |= (mode << (2 * dn));
+ }
+ pci_write_config_byte(pdev, 0x54, ultra);
+}
+
+/**
+ * artop6260_set_dmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring
+ *
+ * Set DMA mode for device, in host controller PCI config space. The
+ * ARTOP6260 and relatives store the timing data differently.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ultra;
+
+ if (adev->dma_mode == XFER_MW_DMA_0)
+ pio = 1;
+ else
+ pio = 4;
+
+ /* Load the PIO timing active/recovery bits */
+ artop6260_load_piomode(ap, adev, pio);
+
+ /* Add ultra DMA bits if in UDMA mode */
+ pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
+ ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
+ if (mode == 0)
+ mode = 1;
+ ultra |= (mode << (4 * adev->devno));
+ }
+ pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
+}
+
+static struct scsi_host_template artop_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations artop6210_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = artop6210_set_piomode,
+ .set_dmamode = artop6210_set_dmamode,
+ .prereset = artop6210_pre_reset,
+};
+
+static struct ata_port_operations artop6260_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = artop6260_cable_detect,
+ .set_piomode = artop6260_set_piomode,
+ .set_dmamode = artop6260_set_dmamode,
+ .prereset = artop6260_pre_reset,
+};
+
+
+/**
+ * artop_init_one - Register ARTOP ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in artop_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static int printed_version;
+ static const struct ata_port_info info_6210 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &artop6210_ops,
+ };
+ static const struct ata_port_info info_626x = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &artop6260_ops,
+ };
+ static const struct ata_port_info info_628x = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &artop6260_ops,
+ };
+ static const struct ata_port_info info_628x_fast = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &artop6260_ops,
+ };
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (id->driver_data == 0) { /* 6210 variant */
+ ppi[0] = &info_6210;
+ ppi[1] = &ata_dummy_port_info;
+ /* BIOS may have left us in UDMA, clear it before libata probe */
+ pci_write_config_byte(pdev, 0x54, 0);
+ /* For the moment (also lacks dsc) */
+ printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n");
+ printk(KERN_WARNING "Secondary ATA ports will not be activated.\n");
+ }
+ else if (id->driver_data == 1) /* 6260 */
+ ppi[0] = &info_626x;
+ else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
+ unsigned long io = pci_resource_start(pdev, 4);
+ u8 reg;
+
+ ppi[0] = &info_628x;
+ if (inb(io) & 0x10)
+ ppi[0] = &info_628x_fast;
+ /* Mac systems come up with some registers not set as we
+ will need them */
+
+ /* Clear reset & test bits */
+ pci_read_config_byte(pdev, 0x49, &reg);
+ pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
+
+ /* PCI latency must be > 0x80 for burst mode, tweak it
+ * if required.
+ */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
+ if (reg <= 0x80)
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
+
+ /* Enable IRQ output and burst mode */
+ pci_read_config_byte(pdev, 0x4a, &reg);
+ pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+
+ }
+
+ BUG_ON(ppi[0] == NULL);
+
+ return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL);
+}
+
+static const struct pci_device_id artop_pci_tbl[] = {
+ { PCI_VDEVICE(ARTOP, 0x0005), 0 },
+ { PCI_VDEVICE(ARTOP, 0x0006), 1 },
+ { PCI_VDEVICE(ARTOP, 0x0007), 1 },
+ { PCI_VDEVICE(ARTOP, 0x0008), 2 },
+ { PCI_VDEVICE(ARTOP, 0x0009), 2 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver artop_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = artop_pci_tbl,
+ .probe = artop_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int __init artop_init(void)
+{
+ return pci_register_driver(&artop_pci_driver);
+}
+
+static void __exit artop_exit(void)
+{
+ pci_unregister_driver(&artop_pci_driver);
+}
+
+module_init(artop_init);
+module_exit(artop_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
new file mode 100644
index 0000000..ab61095
--- /dev/null
+++ b/drivers/ata/pata_at32.c
@@ -0,0 +1,409 @@
+/*
+ * AVR32 SMC/CFC PATA Driver
+ *
+ * Copyright (C) 2007 Atmel Norway
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <mach/board.h>
+#include <mach/smc.h>
+
+#define DRV_NAME "pata_at32"
+#define DRV_VERSION "0.0.3"
+
+/*
+ * CompactFlash controller memory layout relative to the base address:
+ *
+ * Attribute memory: 0000 0000 -> 003f ffff
+ * Common memory: 0040 0000 -> 007f ffff
+ * I/O memory: 0080 0000 -> 00bf ffff
+ * True IDE Mode: 00c0 0000 -> 00df ffff
+ * Alt IDE Mode: 00e0 0000 -> 00ff ffff
+ *
+ * Only True IDE and Alt True IDE mode are needed for this driver.
+ *
+ * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
+ * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
+ */
+#define CF_IDE_OFFSET 0x00c00000
+#define CF_ALT_IDE_OFFSET 0x00e00000
+#define CF_RES_SIZE 2048
+
+/*
+ * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
+ * adaptor with a logic analyzer or similar.
+ */
+#undef DEBUG_BUS
+
+/*
+ * ATA PIO modes
+ *
+ * Name | Mb/s | Min cycle time | Mask
+ * --------+-------+----------------+--------
+ * Mode 0 | 3.3 | 600 ns | 0x01
+ * Mode 1 | 5.2 | 383 ns | 0x03
+ * Mode 2 | 8.3 | 240 ns | 0x07
+ * Mode 3 | 11.1 | 180 ns | 0x0f
+ * Mode 4 | 16.7 | 120 ns | 0x1f
+ *
+ * Alter PIO_MASK below according to table to set maximal PIO mode.
+ */
+#define PIO_MASK (0x1f)
+
+/*
+ * Struct containing private information about device.
+ */
+struct at32_ide_info {
+ unsigned int irq;
+ struct resource res_ide;
+ struct resource res_alt;
+ void __iomem *ide_addr;
+ void __iomem *alt_addr;
+ unsigned int cs;
+ struct smc_config smc;
+};
+
+/*
+ * Setup SMC for the given ATA timing.
+ */
+static int pata_at32_setup_timing(struct device *dev,
+ struct at32_ide_info *info,
+ const struct ata_timing *ata)
+{
+ struct smc_config *smc = &info->smc;
+ struct smc_timing timing;
+
+ int active;
+ int recover;
+
+ memset(&timing, 0, sizeof(struct smc_timing));
+
+ /* Total cycle time */
+ timing.read_cycle = ata->cyc8b;
+
+ /* DIOR <= CFIOR timings */
+ timing.nrd_setup = ata->setup;
+ timing.nrd_pulse = ata->act8b;
+ timing.nrd_recover = ata->rec8b;
+
+ /* Convert nanosecond timing to clock cycles */
+ smc_set_timing(smc, &timing);
+
+ /* Add one extra cycle setup due to signal ring */
+ smc->nrd_setup = smc->nrd_setup + 1;
+
+ active = smc->nrd_setup + smc->nrd_pulse;
+ recover = smc->read_cycle - active;
+
+ /* Need at least two cycles recovery */
+ if (recover < 2)
+ smc->read_cycle = active + 2;
+
+ /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
+ smc->ncs_read_setup = 1;
+ smc->ncs_read_pulse = smc->read_cycle - 2;
+
+ /* Write timings same as read timings */
+ smc->write_cycle = smc->read_cycle;
+ smc->nwe_setup = smc->nrd_setup;
+ smc->nwe_pulse = smc->nrd_pulse;
+ smc->ncs_write_setup = smc->ncs_read_setup;
+ smc->ncs_write_pulse = smc->ncs_read_pulse;
+
+ /* Do some debugging output of ATA and SMC timings */
+ dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
+ ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
+
+ dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
+ smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
+ smc->ncs_read_setup, smc->ncs_read_pulse);
+
+ /* Finally, configure the SMC */
+ return smc_set_configuration(info->cs, smc);
+}
+
+/*
+ * Procedures for libATA.
+ */
+static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing timing;
+ struct at32_ide_info *info = ap->host->private_data;
+
+ int ret;
+
+ /* Compute ATA timing */
+ ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
+ if (ret) {
+ dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
+ return;
+ }
+
+ /* Setup SMC to ATA timing */
+ ret = pata_at32_setup_timing(ap->dev, info, &timing);
+ if (ret) {
+ dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
+ return;
+ }
+}
+
+static struct scsi_host_template at32_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations at32_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = pata_at32_set_piomode,
+};
+
+static int __init pata_at32_init_one(struct device *dev,
+ struct at32_ide_info *info)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+
+ host = ata_host_alloc(dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+
+ /* Setup ATA bindings */
+ ap->ops = &at32_port_ops;
+ ap->pio_mask = PIO_MASK;
+ ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS;
+
+ /*
+ * Since all 8-bit taskfile transfers has to go on the lower
+ * byte of the data bus and there is a bug in the SMC that
+ * makes it impossible to alter the bus width during runtime,
+ * we need to hardwire the address signals as follows:
+ *
+ * A_IDE(2:0) <= A_EBI(3:1)
+ *
+ * This makes all addresses on the EBI even, thus all data
+ * will be on the lower byte of the data bus. All addresses
+ * used by libATA need to be altered according to this.
+ */
+ ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
+ ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
+
+ ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
+ ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
+ ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
+ ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
+ ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
+ ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
+ ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
+ ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
+ ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
+ ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
+
+ /* Set info as private data of ATA host */
+ host->private_data = info;
+
+ /* Register ATA device and return */
+ return ata_host_activate(host, info->irq, ata_sff_interrupt,
+ IRQF_SHARED | IRQF_TRIGGER_RISING,
+ &at32_sht);
+}
+
+/*
+ * This function may come in handy for people analyzing their own
+ * EBI -> PATA adaptors.
+ */
+#ifdef DEBUG_BUS
+
+static void __init pata_at32_debug_bus(struct device *dev,
+ struct at32_ide_info *info)
+{
+ const int d1 = 0xff;
+ const int d2 = 0x00;
+
+ int i;
+
+ /* Write 8-bit values (registers) */
+ iowrite8(d1, info->alt_addr + (0x06 << 1));
+ iowrite8(d2, info->alt_addr + (0x06 << 1));
+
+ for (i = 0; i < 8; i++) {
+ iowrite8(d1, info->ide_addr + (i << 1));
+ iowrite8(d2, info->ide_addr + (i << 1));
+ }
+
+ /* Write 16 bit values (data) */
+ iowrite16(d1, info->ide_addr);
+ iowrite16(d1 << 8, info->ide_addr);
+
+ iowrite16(d1, info->ide_addr);
+ iowrite16(d1 << 8, info->ide_addr);
+}
+
+#endif
+
+static int __init pata_at32_probe(struct platform_device *pdev)
+{
+ const struct ata_timing initial_timing =
+ {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+
+ struct device *dev = &pdev->dev;
+ struct at32_ide_info *info;
+ struct ide_platform_data *board = pdev->dev.platform_data;
+ struct resource *res;
+
+ int irq;
+ int ret;
+
+ if (!board)
+ return -ENXIO;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ /* Retrive IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* Setup struct containing private information */
+ info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->irq = irq;
+ info->cs = board->cs;
+
+ /* Request memory resources */
+ info->res_ide.start = res->start + CF_IDE_OFFSET;
+ info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
+ info->res_ide.name = "ide";
+ info->res_ide.flags = IORESOURCE_MEM;
+
+ ret = request_resource(res, &info->res_ide);
+ if (ret)
+ goto err_req_res_ide;
+
+ info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
+ info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
+ info->res_alt.name = "alt";
+ info->res_alt.flags = IORESOURCE_MEM;
+
+ ret = request_resource(res, &info->res_alt);
+ if (ret)
+ goto err_req_res_alt;
+
+ /* Setup non-timing elements of SMC */
+ info->smc.bus_width = 2; /* 16 bit data bus */
+ info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
+ info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
+ info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
+ info->smc.byte_write = 0; /* Byte select access type */
+ info->smc.tdf_mode = 0; /* TDF optimization disabled */
+ info->smc.tdf_cycles = 0; /* No TDF wait cycles */
+
+ /* Setup SMC to ATA timing */
+ ret = pata_at32_setup_timing(dev, info, &initial_timing);
+ if (ret)
+ goto err_setup_timing;
+
+ /* Map ATA address space */
+ ret = -ENOMEM;
+ info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
+ info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
+ if (!info->ide_addr || !info->alt_addr)
+ goto err_ioremap;
+
+#ifdef DEBUG_BUS
+ pata_at32_debug_bus(dev, info);
+#endif
+
+ /* Setup and register ATA device */
+ ret = pata_at32_init_one(dev, info);
+ if (ret)
+ goto err_ata_device;
+
+ return 0;
+
+ err_ata_device:
+ err_ioremap:
+ err_setup_timing:
+ release_resource(&info->res_alt);
+ err_req_res_alt:
+ release_resource(&info->res_ide);
+ err_req_res_ide:
+ kfree(info);
+
+ return ret;
+}
+
+static int __exit pata_at32_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct at32_ide_info *info;
+
+ if (!host)
+ return 0;
+
+ info = host->private_data;
+ ata_host_detach(host);
+
+ if (!info)
+ return 0;
+
+ release_resource(&info->res_ide);
+ release_resource(&info->res_alt);
+
+ kfree(info);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:at32_ide");
+
+static struct platform_driver pata_at32_driver = {
+ .remove = __exit_p(pata_at32_remove),
+ .driver = {
+ .name = "at32_ide",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pata_at32_init(void)
+{
+ return platform_driver_probe(&pata_at32_driver, pata_at32_probe);
+}
+
+static void __exit pata_at32_exit(void)
+{
+ platform_driver_unregister(&pata_at32_driver);
+}
+
+module_init(pata_at32_init);
+module_exit(pata_at32_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
+MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
new file mode 100644
index 0000000..0e2cde8
--- /dev/null
+++ b/drivers/ata/pata_atiixp.c
@@ -0,0 +1,287 @@
+/*
+ * pata_atiixp.c - ATI PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * Based on
+ *
+ * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
+ *
+ * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
+ * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_atiixp"
+#define DRV_VERSION "0.4.6"
+
+enum {
+ ATIIXP_IDE_PIO_TIMING = 0x40,
+ ATIIXP_IDE_MWDMA_TIMING = 0x44,
+ ATIIXP_IDE_PIO_CONTROL = 0x48,
+ ATIIXP_IDE_PIO_MODE = 0x4a,
+ ATIIXP_IDE_UDMA_CONTROL = 0x54,
+ ATIIXP_IDE_UDMA_MODE = 0x56
+};
+
+static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ static const struct pci_bits atiixp_enable_bits[] = {
+ { 0x48, 1, 0x01, 0x00 },
+ { 0x48, 1, 0x08, 0x00 }
+ };
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+static int atiixp_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 udma;
+
+ /* Hack from drivers/ide/pci. Really we want to know how to do the
+ raw detection not play follow the bios mode guess */
+ pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
+ if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * atiixp_set_pio_timing - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called by both the pio and dma setup functions to set the controller
+ * timings for PIO transfers. We must load both the mode number and
+ * timing values into the controller.
+ */
+
+static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+ static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = 2 * ap->port_no + adev->devno;
+
+ /* Check this is correct - the order is odd in both drivers */
+ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+ u16 pio_mode_data, pio_timing_data;
+
+ pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
+ pio_mode_data &= ~(0x7 << (4 * dn));
+ pio_mode_data |= pio << (4 * dn);
+ pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
+
+ pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
+ pio_timing_data &= ~(0xFF << timing_shift);
+ pio_timing_data |= (pio_timings[pio] << timing_shift);
+ pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
+}
+
+/**
+ * atiixp_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. We use a shared helper for this
+ * as the DMA setup must also adjust the PIO timing information.
+ */
+
+static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ * atiixp_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the DMA mode setup. We use timing tables for most
+ * modes but must tune an appropriate PIO mode to match.
+ */
+
+static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dma = adev->dma_mode;
+ int dn = 2 * ap->port_no + adev->devno;
+ int wanted_pio;
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ u16 udma_mode_data;
+
+ dma -= XFER_UDMA_0;
+
+ pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
+ udma_mode_data &= ~(0x7 << (4 * dn));
+ udma_mode_data |= dma << (4 * dn);
+ pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
+ } else {
+ u16 mwdma_timing_data;
+ /* Check this is correct - the order is odd in both drivers */
+ int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+
+ dma -= XFER_MW_DMA_0;
+
+ pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
+ mwdma_timing_data &= ~(0xFF << timing_shift);
+ mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
+ pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
+ }
+ /*
+ * We must now look at the PIO mode situation. We may need to
+ * adjust the PIO mode to keep the timings acceptable
+ */
+ if (adev->dma_mode >= XFER_MW_DMA_2)
+ wanted_pio = 4;
+ else if (adev->dma_mode == XFER_MW_DMA_1)
+ wanted_pio = 3;
+ else if (adev->dma_mode == XFER_MW_DMA_0)
+ wanted_pio = 0;
+ else BUG();
+
+ if (adev->pio_mode != wanted_pio)
+ atiixp_set_pio_timing(ap, adev, wanted_pio);
+}
+
+/**
+ * atiixp_bmdma_start - DMA start callback
+ * @qc: Command in progress
+ *
+ * When DMA begins we need to ensure that the UDMA control
+ * register for the channel is correctly set.
+ *
+ * Note: The host lock held by the libata layer protects
+ * us from two channels both trying to set DMA bits at once
+ */
+
+static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = (2 * ap->port_no) + adev->devno;
+ u16 tmp16;
+
+ pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+ if (ata_using_udma(adev))
+ tmp16 |= (1 << dn);
+ else
+ tmp16 &= ~(1 << dn);
+ pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+ ata_bmdma_start(qc);
+}
+
+/**
+ * atiixp_dma_stop - DMA stop callback
+ * @qc: Command in progress
+ *
+ * DMA has completed. Clear the UDMA flag as the next operations will
+ * be PIO ones not UDMA data transfer.
+ *
+ * Note: The host lock held by the libata layer protects
+ * us from two channels both trying to set DMA bits at once
+ */
+
+static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int dn = (2 * ap->port_no) + qc->dev->devno;
+ u16 tmp16;
+
+ pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+ tmp16 &= ~(1 << dn);
+ pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+ ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template atiixp_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations atiixp_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .bmdma_start = atiixp_bmdma_start,
+ .bmdma_stop = atiixp_bmdma_stop,
+
+ .cable_detect = atiixp_cable_detect,
+ .set_piomode = atiixp_set_piomode,
+ .set_dmamode = atiixp_set_dmamode,
+ .prereset = atiixp_pre_reset,
+};
+
+static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x06, /* No MWDMA0 support */
+ .udma_mask = 0x3F,
+ .port_ops = &atiixp_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ return ata_pci_sff_init_one(dev, ppi, &atiixp_sht, NULL);
+}
+
+static const struct pci_device_id atiixp[] = {
+ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
+ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
+ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
+ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
+ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
+
+ { },
+};
+
+static struct pci_driver atiixp_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = atiixp,
+ .probe = atiixp_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .resume = ata_pci_device_resume,
+ .suspend = ata_pci_device_suspend,
+#endif
+};
+
+static int __init atiixp_init(void)
+{
+ return pci_register_driver(&atiixp_pci_driver);
+}
+
+
+static void __exit atiixp_exit(void)
+{
+ pci_unregister_driver(&atiixp_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, atiixp);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(atiixp_init);
+module_exit(atiixp_exit);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
new file mode 100644
index 0000000..1266924
--- /dev/null
+++ b/drivers/ata/pata_bf54x.c
@@ -0,0 +1,1743 @@
+/*
+ * File: drivers/ata/pata_bf54x.c
+ * Author: Sonic Zhang <sonic.zhang@analog.com>
+ *
+ * Created:
+ * Description: PATA Driver for blackfin 54x
+ *
+ * Modified:
+ * Copyright 2007 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <asm/dma.h>
+#include <asm/gpio.h>
+#include <asm/portmux.h>
+
+#define DRV_NAME "pata-bf54x"
+#define DRV_VERSION "0.9"
+
+#define ATA_REG_CTRL 0x0E
+#define ATA_REG_ALTSTATUS ATA_REG_CTRL
+
+/* These are the offset of the controller's registers */
+#define ATAPI_OFFSET_CONTROL 0x00
+#define ATAPI_OFFSET_STATUS 0x04
+#define ATAPI_OFFSET_DEV_ADDR 0x08
+#define ATAPI_OFFSET_DEV_TXBUF 0x0c
+#define ATAPI_OFFSET_DEV_RXBUF 0x10
+#define ATAPI_OFFSET_INT_MASK 0x14
+#define ATAPI_OFFSET_INT_STATUS 0x18
+#define ATAPI_OFFSET_XFER_LEN 0x1c
+#define ATAPI_OFFSET_LINE_STATUS 0x20
+#define ATAPI_OFFSET_SM_STATE 0x24
+#define ATAPI_OFFSET_TERMINATE 0x28
+#define ATAPI_OFFSET_PIO_TFRCNT 0x2c
+#define ATAPI_OFFSET_DMA_TFRCNT 0x30
+#define ATAPI_OFFSET_UMAIN_TFRCNT 0x34
+#define ATAPI_OFFSET_UDMAOUT_TFRCNT 0x38
+#define ATAPI_OFFSET_REG_TIM_0 0x40
+#define ATAPI_OFFSET_PIO_TIM_0 0x44
+#define ATAPI_OFFSET_PIO_TIM_1 0x48
+#define ATAPI_OFFSET_MULTI_TIM_0 0x50
+#define ATAPI_OFFSET_MULTI_TIM_1 0x54
+#define ATAPI_OFFSET_MULTI_TIM_2 0x58
+#define ATAPI_OFFSET_ULTRA_TIM_0 0x60
+#define ATAPI_OFFSET_ULTRA_TIM_1 0x64
+#define ATAPI_OFFSET_ULTRA_TIM_2 0x68
+#define ATAPI_OFFSET_ULTRA_TIM_3 0x6c
+
+
+#define ATAPI_GET_CONTROL(base)\
+ bfin_read16(base + ATAPI_OFFSET_CONTROL)
+#define ATAPI_SET_CONTROL(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_CONTROL, val)
+#define ATAPI_GET_STATUS(base)\
+ bfin_read16(base + ATAPI_OFFSET_STATUS)
+#define ATAPI_GET_DEV_ADDR(base)\
+ bfin_read16(base + ATAPI_OFFSET_DEV_ADDR)
+#define ATAPI_SET_DEV_ADDR(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_DEV_ADDR, val)
+#define ATAPI_GET_DEV_TXBUF(base)\
+ bfin_read16(base + ATAPI_OFFSET_DEV_TXBUF)
+#define ATAPI_SET_DEV_TXBUF(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_DEV_TXBUF, val)
+#define ATAPI_GET_DEV_RXBUF(base)\
+ bfin_read16(base + ATAPI_OFFSET_DEV_RXBUF)
+#define ATAPI_SET_DEV_RXBUF(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_DEV_RXBUF, val)
+#define ATAPI_GET_INT_MASK(base)\
+ bfin_read16(base + ATAPI_OFFSET_INT_MASK)
+#define ATAPI_SET_INT_MASK(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_INT_MASK, val)
+#define ATAPI_GET_INT_STATUS(base)\
+ bfin_read16(base + ATAPI_OFFSET_INT_STATUS)
+#define ATAPI_SET_INT_STATUS(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_INT_STATUS, val)
+#define ATAPI_GET_XFER_LEN(base)\
+ bfin_read16(base + ATAPI_OFFSET_XFER_LEN)
+#define ATAPI_SET_XFER_LEN(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_XFER_LEN, val)
+#define ATAPI_GET_LINE_STATUS(base)\
+ bfin_read16(base + ATAPI_OFFSET_LINE_STATUS)
+#define ATAPI_GET_SM_STATE(base)\
+ bfin_read16(base + ATAPI_OFFSET_SM_STATE)
+#define ATAPI_GET_TERMINATE(base)\
+ bfin_read16(base + ATAPI_OFFSET_TERMINATE)
+#define ATAPI_SET_TERMINATE(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_TERMINATE, val)
+#define ATAPI_GET_PIO_TFRCNT(base)\
+ bfin_read16(base + ATAPI_OFFSET_PIO_TFRCNT)
+#define ATAPI_GET_DMA_TFRCNT(base)\
+ bfin_read16(base + ATAPI_OFFSET_DMA_TFRCNT)
+#define ATAPI_GET_UMAIN_TFRCNT(base)\
+ bfin_read16(base + ATAPI_OFFSET_UMAIN_TFRCNT)
+#define ATAPI_GET_UDMAOUT_TFRCNT(base)\
+ bfin_read16(base + ATAPI_OFFSET_UDMAOUT_TFRCNT)
+#define ATAPI_GET_REG_TIM_0(base)\
+ bfin_read16(base + ATAPI_OFFSET_REG_TIM_0)
+#define ATAPI_SET_REG_TIM_0(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_REG_TIM_0, val)
+#define ATAPI_GET_PIO_TIM_0(base)\
+ bfin_read16(base + ATAPI_OFFSET_PIO_TIM_0)
+#define ATAPI_SET_PIO_TIM_0(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_PIO_TIM_0, val)
+#define ATAPI_GET_PIO_TIM_1(base)\
+ bfin_read16(base + ATAPI_OFFSET_PIO_TIM_1)
+#define ATAPI_SET_PIO_TIM_1(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_PIO_TIM_1, val)
+#define ATAPI_GET_MULTI_TIM_0(base)\
+ bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_0)
+#define ATAPI_SET_MULTI_TIM_0(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_0, val)
+#define ATAPI_GET_MULTI_TIM_1(base)\
+ bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_1)
+#define ATAPI_SET_MULTI_TIM_1(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_1, val)
+#define ATAPI_GET_MULTI_TIM_2(base)\
+ bfin_read16(base + ATAPI_OFFSET_MULTI_TIM_2)
+#define ATAPI_SET_MULTI_TIM_2(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_MULTI_TIM_2, val)
+#define ATAPI_GET_ULTRA_TIM_0(base)\
+ bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_0)
+#define ATAPI_SET_ULTRA_TIM_0(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_0, val)
+#define ATAPI_GET_ULTRA_TIM_1(base)\
+ bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_1)
+#define ATAPI_SET_ULTRA_TIM_1(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_1, val)
+#define ATAPI_GET_ULTRA_TIM_2(base)\
+ bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_2)
+#define ATAPI_SET_ULTRA_TIM_2(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_2, val)
+#define ATAPI_GET_ULTRA_TIM_3(base)\
+ bfin_read16(base + ATAPI_OFFSET_ULTRA_TIM_3)
+#define ATAPI_SET_ULTRA_TIM_3(base, val)\
+ bfin_write16(base + ATAPI_OFFSET_ULTRA_TIM_3, val)
+
+/**
+ * PIO Mode - Frequency compatibility
+ */
+/* mode: 0 1 2 3 4 */
+static const u32 pio_fsclk[] =
+{ 33333333, 33333333, 33333333, 33333333, 33333333 };
+
+/**
+ * MDMA Mode - Frequency compatibility
+ */
+/* mode: 0 1 2 */
+static const u32 mdma_fsclk[] = { 33333333, 33333333, 33333333 };
+
+/**
+ * UDMA Mode - Frequency compatibility
+ *
+ * UDMA5 - 100 MB/s - SCLK = 133 MHz
+ * UDMA4 - 66 MB/s - SCLK >= 80 MHz
+ * UDMA3 - 44.4 MB/s - SCLK >= 50 MHz
+ * UDMA2 - 33 MB/s - SCLK >= 40 MHz
+ */
+/* mode: 0 1 2 3 4 5 */
+static const u32 udma_fsclk[] =
+{ 33333333, 33333333, 40000000, 50000000, 80000000, 133333333 };
+
+/**
+ * Register transfer timing table
+ */
+/* mode: 0 1 2 3 4 */
+/* Cycle Time */
+static const u32 reg_t0min[] = { 600, 383, 330, 180, 120 };
+/* DIOR/DIOW to end cycle */
+static const u32 reg_t2min[] = { 290, 290, 290, 70, 25 };
+/* DIOR/DIOW asserted pulse width */
+static const u32 reg_teocmin[] = { 290, 290, 290, 80, 70 };
+
+/**
+ * PIO timing table
+ */
+/* mode: 0 1 2 3 4 */
+/* Cycle Time */
+static const u32 pio_t0min[] = { 600, 383, 240, 180, 120 };
+/* Address valid to DIOR/DIORW */
+static const u32 pio_t1min[] = { 70, 50, 30, 30, 25 };
+/* DIOR/DIOW to end cycle */
+static const u32 pio_t2min[] = { 165, 125, 100, 80, 70 };
+/* DIOR/DIOW asserted pulse width */
+static const u32 pio_teocmin[] = { 165, 125, 100, 70, 25 };
+/* DIOW data hold */
+static const u32 pio_t4min[] = { 30, 20, 15, 10, 10 };
+
+/* ******************************************************************
+ * Multiword DMA timing table
+ * ******************************************************************
+ */
+/* mode: 0 1 2 */
+/* Cycle Time */
+static const u32 mdma_t0min[] = { 480, 150, 120 };
+/* DIOR/DIOW asserted pulse width */
+static const u32 mdma_tdmin[] = { 215, 80, 70 };
+/* DMACK to read data released */
+static const u32 mdma_thmin[] = { 20, 15, 10 };
+/* DIOR/DIOW to DMACK hold */
+static const u32 mdma_tjmin[] = { 20, 5, 5 };
+/* DIOR negated pulse width */
+static const u32 mdma_tkrmin[] = { 50, 50, 25 };
+/* DIOR negated pulse width */
+static const u32 mdma_tkwmin[] = { 215, 50, 25 };
+/* CS[1:0] valid to DIOR/DIOW */
+static const u32 mdma_tmmin[] = { 50, 30, 25 };
+/* DMACK to read data released */
+static const u32 mdma_tzmax[] = { 20, 25, 25 };
+
+/**
+ * Ultra DMA timing table
+ */
+/* mode: 0 1 2 3 4 5 */
+static const u32 udma_tcycmin[] = { 112, 73, 54, 39, 25, 17 };
+static const u32 udma_tdvsmin[] = { 70, 48, 31, 20, 7, 5 };
+static const u32 udma_tenvmax[] = { 70, 70, 70, 55, 55, 50 };
+static const u32 udma_trpmin[] = { 160, 125, 100, 100, 100, 85 };
+static const u32 udma_tmin[] = { 5, 5, 5, 5, 3, 3 };
+
+
+static const u32 udma_tmlimin = 20;
+static const u32 udma_tzahmin = 20;
+static const u32 udma_tenvmin = 20;
+static const u32 udma_tackmin = 20;
+static const u32 udma_tssmin = 50;
+
+/**
+ *
+ * Function: num_clocks_min
+ *
+ * Description:
+ * calculate number of SCLK cycles to meet minimum timing
+ */
+static unsigned short num_clocks_min(unsigned long tmin,
+ unsigned long fsclk)
+{
+ unsigned long tmp ;
+ unsigned short result;
+
+ tmp = tmin * (fsclk/1000/1000) / 1000;
+ result = (unsigned short)tmp;
+ if ((tmp*1000*1000) < (tmin*(fsclk/1000))) {
+ result++;
+ }
+
+ return result;
+}
+
+/**
+ * bfin_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void bfin_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ int mode = adev->pio_mode - XFER_PIO_0;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned int fsclk = get_sclk();
+ unsigned short teoc_reg, t2_reg, teoc_pio;
+ unsigned short t4_reg, t2_pio, t1_reg;
+ unsigned short n0, n6, t6min = 5;
+
+ /* the most restrictive timing value is t6 and tc, the DIOW - data hold
+ * If one SCLK pulse is longer than this minimum value then register
+ * transfers cannot be supported at this frequency.
+ */
+ n6 = num_clocks_min(t6min, fsclk);
+ if (mode >= 0 && mode <= 4 && n6 >= 1) {
+ dev_dbg(adev->link->ap->dev, "set piomode: mode=%d, fsclk=%ud\n", mode, fsclk);
+ /* calculate the timing values for register transfers. */
+ while (mode > 0 && pio_fsclk[mode] > fsclk)
+ mode--;
+
+ /* DIOR/DIOW to end cycle time */
+ t2_reg = num_clocks_min(reg_t2min[mode], fsclk);
+ /* DIOR/DIOW asserted pulse width */
+ teoc_reg = num_clocks_min(reg_teocmin[mode], fsclk);
+ /* Cycle Time */
+ n0 = num_clocks_min(reg_t0min[mode], fsclk);
+
+ /* increase t2 until we meed the minimum cycle length */
+ if (t2_reg + teoc_reg < n0)
+ t2_reg = n0 - teoc_reg;
+
+ /* calculate the timing values for pio transfers. */
+
+ /* DIOR/DIOW to end cycle time */
+ t2_pio = num_clocks_min(pio_t2min[mode], fsclk);
+ /* DIOR/DIOW asserted pulse width */
+ teoc_pio = num_clocks_min(pio_teocmin[mode], fsclk);
+ /* Cycle Time */
+ n0 = num_clocks_min(pio_t0min[mode], fsclk);
+
+ /* increase t2 until we meed the minimum cycle length */
+ if (t2_pio + teoc_pio < n0)
+ t2_pio = n0 - teoc_pio;
+
+ /* Address valid to DIOR/DIORW */
+ t1_reg = num_clocks_min(pio_t1min[mode], fsclk);
+
+ /* DIOW data hold */
+ t4_reg = num_clocks_min(pio_t4min[mode], fsclk);
+
+ ATAPI_SET_REG_TIM_0(base, (teoc_reg<<8 | t2_reg));
+ ATAPI_SET_PIO_TIM_0(base, (t4_reg<<12 | t2_pio<<4 | t1_reg));
+ ATAPI_SET_PIO_TIM_1(base, teoc_pio);
+ if (mode > 2) {
+ ATAPI_SET_CONTROL(base,
+ ATAPI_GET_CONTROL(base) | IORDY_EN);
+ } else {
+ ATAPI_SET_CONTROL(base,
+ ATAPI_GET_CONTROL(base) & ~IORDY_EN);
+ }
+
+ /* Disable host ATAPI PIO interrupts */
+ ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
+ & ~(PIO_DONE_MASK | HOST_TERM_XFER_MASK));
+ SSYNC();
+ }
+}
+
+/**
+ * bfin_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ * @udma: udma mode, 0 - 6
+ *
+ * Set UDMA mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ int mode;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned long fsclk = get_sclk();
+ unsigned short tenv, tack, tcyc_tdvs, tdvs, tmli, tss, trp, tzah;
+ unsigned short tm, td, tkr, tkw, teoc, th;
+ unsigned short n0, nf, tfmin = 5;
+ unsigned short nmin, tcyc;
+
+ mode = adev->dma_mode - XFER_UDMA_0;
+ if (mode >= 0 && mode <= 5) {
+ dev_dbg(adev->link->ap->dev, "set udmamode: mode=%d\n", mode);
+ /* the most restrictive timing value is t6 and tc,
+ * the DIOW - data hold. If one SCLK pulse is longer
+ * than this minimum value then register
+ * transfers cannot be supported at this frequency.
+ */
+ while (mode > 0 && udma_fsclk[mode] > fsclk)
+ mode--;
+
+ nmin = num_clocks_min(udma_tmin[mode], fsclk);
+ if (nmin >= 1) {
+ /* calculate the timing values for Ultra DMA. */
+ tdvs = num_clocks_min(udma_tdvsmin[mode], fsclk);
+ tcyc = num_clocks_min(udma_tcycmin[mode], fsclk);
+ tcyc_tdvs = 2;
+
+ /* increase tcyc - tdvs (tcyc_tdvs) until we meed
+ * the minimum cycle length
+ */
+ if (tdvs + tcyc_tdvs < tcyc)
+ tcyc_tdvs = tcyc - tdvs;
+
+ /* Mow assign the values required for the timing
+ * registers
+ */
+ if (tcyc_tdvs < 2)
+ tcyc_tdvs = 2;
+
+ if (tdvs < 2)
+ tdvs = 2;
+
+ tack = num_clocks_min(udma_tackmin, fsclk);
+ tss = num_clocks_min(udma_tssmin, fsclk);
+ tmli = num_clocks_min(udma_tmlimin, fsclk);
+ tzah = num_clocks_min(udma_tzahmin, fsclk);
+ trp = num_clocks_min(udma_trpmin[mode], fsclk);
+ tenv = num_clocks_min(udma_tenvmin, fsclk);
+ if (tenv <= udma_tenvmax[mode]) {
+ ATAPI_SET_ULTRA_TIM_0(base, (tenv<<8 | tack));
+ ATAPI_SET_ULTRA_TIM_1(base,
+ (tcyc_tdvs<<8 | tdvs));
+ ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
+ ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
+
+ /* Enable host ATAPI Untra DMA interrupts */
+ ATAPI_SET_INT_MASK(base,
+ ATAPI_GET_INT_MASK(base)
+ | UDMAIN_DONE_MASK
+ | UDMAOUT_DONE_MASK
+ | UDMAIN_TERM_MASK
+ | UDMAOUT_TERM_MASK);
+ }
+ }
+ }
+
+ mode = adev->dma_mode - XFER_MW_DMA_0;
+ if (mode >= 0 && mode <= 2) {
+ dev_dbg(adev->link->ap->dev, "set mdmamode: mode=%d\n", mode);
+ /* the most restrictive timing value is tf, the DMACK to
+ * read data released. If one SCLK pulse is longer than
+ * this maximum value then the MDMA mode
+ * cannot be supported at this frequency.
+ */
+ while (mode > 0 && mdma_fsclk[mode] > fsclk)
+ mode--;
+
+ nf = num_clocks_min(tfmin, fsclk);
+ if (nf >= 1) {
+ /* calculate the timing values for Multi-word DMA. */
+
+ /* DIOR/DIOW asserted pulse width */
+ td = num_clocks_min(mdma_tdmin[mode], fsclk);
+
+ /* DIOR negated pulse width */
+ tkw = num_clocks_min(mdma_tkwmin[mode], fsclk);
+
+ /* Cycle Time */
+ n0 = num_clocks_min(mdma_t0min[mode], fsclk);
+
+ /* increase tk until we meed the minimum cycle length */
+ if (tkw + td < n0)
+ tkw = n0 - td;
+
+ /* DIOR negated pulse width - read */
+ tkr = num_clocks_min(mdma_tkrmin[mode], fsclk);
+ /* CS{1:0] valid to DIOR/DIOW */
+ tm = num_clocks_min(mdma_tmmin[mode], fsclk);
+ /* DIOR/DIOW to DMACK hold */
+ teoc = num_clocks_min(mdma_tjmin[mode], fsclk);
+ /* DIOW Data hold */
+ th = num_clocks_min(mdma_thmin[mode], fsclk);
+
+ ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
+ ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
+ ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
+
+ /* Enable host ATAPI Multi DMA interrupts */
+ ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
+ | MULTI_DONE_MASK | MULTI_TERM_MASK);
+ SSYNC();
+ }
+ }
+ return;
+}
+
+/**
+ *
+ * Function: wait_complete
+ *
+ * Description: Waits the interrupt from device
+ *
+ */
+static inline void wait_complete(void __iomem *base, unsigned short mask)
+{
+ unsigned short status;
+ unsigned int i = 0;
+
+#define PATA_BF54X_WAIT_TIMEOUT 10000
+
+ for (i = 0; i < PATA_BF54X_WAIT_TIMEOUT; i++) {
+ status = ATAPI_GET_INT_STATUS(base) & mask;
+ if (status)
+ break;
+ }
+
+ ATAPI_SET_INT_STATUS(base, mask);
+}
+
+/**
+ *
+ * Function: write_atapi_register
+ *
+ * Description: Writes to ATA Device Resgister
+ *
+ */
+
+static void write_atapi_register(void __iomem *base,
+ unsigned long ata_reg, unsigned short value)
+{
+ /* Program the ATA_DEV_TXBUF register with write data (to be
+ * written into the device).
+ */
+ ATAPI_SET_DEV_TXBUF(base, value);
+
+ /* Program the ATA_DEV_ADDR register with address of the
+ * device register (0x01 to 0x0F).
+ */
+ ATAPI_SET_DEV_ADDR(base, ata_reg);
+
+ /* Program the ATA_CTRL register with dir set to write (1)
+ */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
+
+ /* ensure PIO DMA is not set */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+ /* and start the transfer */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+ /* Wait for the interrupt to indicate the end of the transfer.
+ * (We need to wait on and clear rhe ATA_DEV_INT interrupt status)
+ */
+ wait_complete(base, PIO_DONE_INT);
+}
+
+/**
+ *
+ * Function: read_atapi_register
+ *
+ *Description: Reads from ATA Device Resgister
+ *
+ */
+
+static unsigned short read_atapi_register(void __iomem *base,
+ unsigned long ata_reg)
+{
+ /* Program the ATA_DEV_ADDR register with address of the
+ * device register (0x01 to 0x0F).
+ */
+ ATAPI_SET_DEV_ADDR(base, ata_reg);
+
+ /* Program the ATA_CTRL register with dir set to read (0) and
+ */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
+
+ /* ensure PIO DMA is not set */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+ /* and start the transfer */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+ /* Wait for the interrupt to indicate the end of the transfer.
+ * (PIO_DONE interrupt is set and it doesn't seem to matter
+ * that we don't clear it)
+ */
+ wait_complete(base, PIO_DONE_INT);
+
+ /* Read the ATA_DEV_RXBUF register with write data (to be
+ * written into the device).
+ */
+ return ATAPI_GET_DEV_RXBUF(base);
+}
+
+/**
+ *
+ * Function: write_atapi_register_data
+ *
+ * Description: Writes to ATA Device Resgister
+ *
+ */
+
+static void write_atapi_data(void __iomem *base,
+ int len, unsigned short *buf)
+{
+ int i;
+
+ /* Set transfer length to 1 */
+ ATAPI_SET_XFER_LEN(base, 1);
+
+ /* Program the ATA_DEV_ADDR register with address of the
+ * ATA_REG_DATA
+ */
+ ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
+
+ /* Program the ATA_CTRL register with dir set to write (1)
+ */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR));
+
+ /* ensure PIO DMA is not set */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+ for (i = 0; i < len; i++) {
+ /* Program the ATA_DEV_TXBUF register with write data (to be
+ * written into the device).
+ */
+ ATAPI_SET_DEV_TXBUF(base, buf[i]);
+
+ /* and start the transfer */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+ /* Wait for the interrupt to indicate the end of the transfer.
+ * (We need to wait on and clear rhe ATA_DEV_INT
+ * interrupt status)
+ */
+ wait_complete(base, PIO_DONE_INT);
+ }
+}
+
+/**
+ *
+ * Function: read_atapi_register_data
+ *
+ * Description: Reads from ATA Device Resgister
+ *
+ */
+
+static void read_atapi_data(void __iomem *base,
+ int len, unsigned short *buf)
+{
+ int i;
+
+ /* Set transfer length to 1 */
+ ATAPI_SET_XFER_LEN(base, 1);
+
+ /* Program the ATA_DEV_ADDR register with address of the
+ * ATA_REG_DATA
+ */
+ ATAPI_SET_DEV_ADDR(base, ATA_REG_DATA);
+
+ /* Program the ATA_CTRL register with dir set to read (0) and
+ */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR));
+
+ /* ensure PIO DMA is not set */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~PIO_USE_DMA));
+
+ for (i = 0; i < len; i++) {
+ /* and start the transfer */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | PIO_START));
+
+ /* Wait for the interrupt to indicate the end of the transfer.
+ * (PIO_DONE interrupt is set and it doesn't seem to matter
+ * that we don't clear it)
+ */
+ wait_complete(base, PIO_DONE_INT);
+
+ /* Read the ATA_DEV_RXBUF register with write data (to be
+ * written into the device).
+ */
+ buf[i] = ATAPI_GET_DEV_RXBUF(base);
+ }
+}
+
+/**
+ * bfin_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_sff_tf_load().
+ */
+
+static void bfin_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ write_atapi_register(base, ATA_REG_CTRL, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ write_atapi_register(base, ATA_REG_FEATURE,
+ tf->hob_feature);
+ write_atapi_register(base, ATA_REG_NSECT,
+ tf->hob_nsect);
+ write_atapi_register(base, ATA_REG_LBAL, tf->hob_lbal);
+ write_atapi_register(base, ATA_REG_LBAM, tf->hob_lbam);
+ write_atapi_register(base, ATA_REG_LBAH, tf->hob_lbah);
+ dev_dbg(ap->dev, "hob: feat 0x%X nsect 0x%X, lba 0x%X "
+ "0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ write_atapi_register(base, ATA_REG_FEATURE, tf->feature);
+ write_atapi_register(base, ATA_REG_NSECT, tf->nsect);
+ write_atapi_register(base, ATA_REG_LBAL, tf->lbal);
+ write_atapi_register(base, ATA_REG_LBAM, tf->lbam);
+ write_atapi_register(base, ATA_REG_LBAH, tf->lbah);
+ dev_dbg(ap->dev, "feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ write_atapi_register(base, ATA_REG_DEVICE, tf->device);
+ dev_dbg(ap->dev, "device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * bfin_check_status - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Note: Original code is ata_check_status().
+ */
+
+static u8 bfin_check_status(struct ata_port *ap)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ return read_atapi_register(base, ATA_REG_STATUS);
+}
+
+/**
+ * bfin_tf_read - input device's ATA taskfile shadow registers
+ * @ap: Port from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Note: Original code is ata_sff_tf_read().
+ */
+
+static void bfin_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ tf->command = bfin_check_status(ap);
+ tf->feature = read_atapi_register(base, ATA_REG_ERR);
+ tf->nsect = read_atapi_register(base, ATA_REG_NSECT);
+ tf->lbal = read_atapi_register(base, ATA_REG_LBAL);
+ tf->lbam = read_atapi_register(base, ATA_REG_LBAM);
+ tf->lbah = read_atapi_register(base, ATA_REG_LBAH);
+ tf->device = read_atapi_register(base, ATA_REG_DEVICE);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ write_atapi_register(base, ATA_REG_CTRL, tf->ctl | ATA_HOB);
+ tf->hob_feature = read_atapi_register(base, ATA_REG_ERR);
+ tf->hob_nsect = read_atapi_register(base, ATA_REG_NSECT);
+ tf->hob_lbal = read_atapi_register(base, ATA_REG_LBAL);
+ tf->hob_lbam = read_atapi_register(base, ATA_REG_LBAM);
+ tf->hob_lbah = read_atapi_register(base, ATA_REG_LBAH);
+ }
+}
+
+/**
+ * bfin_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_sff_exec_command().
+ */
+
+static void bfin_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ dev_dbg(ap->dev, "ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ write_atapi_register(base, ATA_REG_CMD, tf->command);
+ ata_sff_pause(ap);
+}
+
+/**
+ * bfin_check_altstatus - Read device alternate status reg
+ * @ap: port where the device is
+ */
+
+static u8 bfin_check_altstatus(struct ata_port *ap)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ return read_atapi_register(base, ATA_REG_ALTSTATUS);
+}
+
+/**
+ * bfin_dev_select - Select device 0/1 on ATA bus
+ * @ap: ATA channel to manipulate
+ * @device: ATA device (numbered from zero) to select
+ *
+ * Note: Original code is ata_sff_dev_select().
+ */
+
+static void bfin_dev_select(struct ata_port *ap, unsigned int device)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ write_atapi_register(base, ATA_REG_DEVICE, tmp);
+ ata_sff_pause(ap);
+}
+
+/**
+ * bfin_bmdma_setup - Set up IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_setup().
+ */
+
+static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ unsigned short config = WDSIZE_16;
+ struct scatterlist *sg;
+ unsigned int si;
+
+ dev_dbg(qc->ap->dev, "in atapi dma setup\n");
+ /* Program the ATA_CTRL register with dir */
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ /* fill the ATAPI DMA controller */
+ set_dma_config(CH_ATAPI_TX, config);
+ set_dma_x_modify(CH_ATAPI_TX, 2);
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
+ set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
+ }
+ } else {
+ config |= WNR;
+ /* fill the ATAPI DMA controller */
+ set_dma_config(CH_ATAPI_RX, config);
+ set_dma_x_modify(CH_ATAPI_RX, 2);
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
+ set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
+ }
+ }
+}
+
+/**
+ * bfin_bmdma_start - Start an IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ struct scatterlist *sg;
+ unsigned int si;
+
+ dev_dbg(qc->ap->dev, "in atapi dma start\n");
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
+
+ /* start ATAPI DMA controller*/
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ /*
+ * On blackfin arch, uncacheable memory is not
+ * allocated with flag GFP_DMA. DMA buffer from
+ * common kenel code should be flushed if WB
+ * data cache is enabled. Otherwise, this loop
+ * is an empty loop and optimized out.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ flush_dcache_range(sg_dma_address(sg),
+ sg_dma_address(sg) + sg_dma_len(sg));
+ }
+ enable_dma(CH_ATAPI_TX);
+ dev_dbg(qc->ap->dev, "enable udma write\n");
+
+ /* Send ATA DMA write command */
+ bfin_exec_command(ap, &qc->tf);
+
+ /* set ATA DMA write direction */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
+ | XFER_DIR));
+ } else {
+ enable_dma(CH_ATAPI_RX);
+ dev_dbg(qc->ap->dev, "enable udma read\n");
+
+ /* Send ATA DMA read command */
+ bfin_exec_command(ap, &qc->tf);
+
+ /* set ATA DMA read direction */
+ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
+ & ~XFER_DIR));
+ }
+
+ /* Reset all transfer count */
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | TFRCNT_RST);
+
+ /* Set ATAPI state machine contorl in terminate sequence */
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
+
+ /* Set transfer length to buffer len */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
+ }
+
+ /* Enable ATA DMA operation*/
+ if (ap->udma_mask)
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
+ | ULTRA_START);
+ else
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
+ | MULTI_START);
+}
+
+/**
+ * bfin_bmdma_stop - Stop IDE DMA transfer
+ * @qc: Command we are ending DMA for
+ */
+
+static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ unsigned int si;
+
+ dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
+
+ /* stop ATAPI DMA controller*/
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ disable_dma(CH_ATAPI_TX);
+ else {
+ disable_dma(CH_ATAPI_RX);
+ if (ap->hsm_task_state & HSM_ST_LAST) {
+ /*
+ * On blackfin arch, uncacheable memory is not
+ * allocated with flag GFP_DMA. DMA buffer from
+ * common kenel code should be invalidated if
+ * data cache is enabled. Otherwise, this loop
+ * is an empty loop and optimized out.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ invalidate_dcache_range(
+ sg_dma_address(sg),
+ sg_dma_address(sg)
+ + sg_dma_len(sg));
+ }
+ }
+ }
+}
+
+/**
+ * bfin_devchk - PATA device presence detection
+ * @ap: ATA channel to examine
+ * @device: Device to examine (starting at zero)
+ *
+ * Note: Original code is ata_devchk().
+ */
+
+static unsigned int bfin_devchk(struct ata_port *ap,
+ unsigned int device)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ u8 nsect, lbal;
+
+ bfin_dev_select(ap, device);
+
+ write_atapi_register(base, ATA_REG_NSECT, 0x55);
+ write_atapi_register(base, ATA_REG_LBAL, 0xaa);
+
+ write_atapi_register(base, ATA_REG_NSECT, 0xaa);
+ write_atapi_register(base, ATA_REG_LBAL, 0x55);
+
+ write_atapi_register(base, ATA_REG_NSECT, 0x55);
+ write_atapi_register(base, ATA_REG_LBAL, 0xaa);
+
+ nsect = read_atapi_register(base, ATA_REG_NSECT);
+ lbal = read_atapi_register(base, ATA_REG_LBAL);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * bfin_bus_post_reset - PATA device post reset
+ *
+ * Note: Original code is ata_bus_post_reset().
+ */
+
+static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ unsigned long deadline;
+
+ /* if device 0 was found in ata_devchk, wait for its
+ * BSY bit to clear
+ */
+ if (dev0)
+ ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* if device 1 was found in ata_devchk, wait for
+ * register access, then wait for BSY to clear
+ */
+ deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
+ while (dev1) {
+ u8 nsect, lbal;
+
+ bfin_dev_select(ap, 1);
+ nsect = read_atapi_register(base, ATA_REG_NSECT);
+ lbal = read_atapi_register(base, ATA_REG_LBAL);
+ if ((nsect == 1) && (lbal == 1))
+ break;
+ if (time_after(jiffies, deadline)) {
+ dev1 = 0;
+ break;
+ }
+ msleep(50); /* give drive a breather */
+ }
+ if (dev1)
+ ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ /* is all this really necessary? */
+ bfin_dev_select(ap, 0);
+ if (dev1)
+ bfin_dev_select(ap, 1);
+ if (dev0)
+ bfin_dev_select(ap, 0);
+}
+
+/**
+ * bfin_bus_softreset - PATA device software reset
+ *
+ * Note: Original code is ata_bus_softreset().
+ */
+
+static unsigned int bfin_bus_softreset(struct ata_port *ap,
+ unsigned int devmask)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ /* software reset. causes dev0 to be selected */
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+ udelay(20);
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl | ATA_SRST);
+ udelay(20);
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+
+ /* spec mandates ">= 2ms" before checking status.
+ * We wait 150ms, because that was the magic delay used for
+ * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+ * between when the ATA command register is written, and then
+ * status is checked. Because waiting for "a while" before
+ * checking status is fine, post SRST, we perform this magic
+ * delay here as well.
+ *
+ * Old drivers/ide uses the 2mS rule and then waits for ready
+ */
+ msleep(150);
+
+ /* Before we perform post reset processing we want to see if
+ * the bus shows 0xFF because the odd clown forgets the D7
+ * pulldown resistor.
+ */
+ if (bfin_check_status(ap) == 0xFF)
+ return 0;
+
+ bfin_bus_post_reset(ap, devmask);
+
+ return 0;
+}
+
+/**
+ * bfin_softreset - reset host port via ATA SRST
+ * @ap: port to reset
+ * @classes: resulting classes of attached devices
+ *
+ * Note: Original code is ata_sff_softreset().
+ */
+
+static int bfin_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0, err_mask;
+ u8 err;
+
+ /* determine if device 0/1 are present */
+ if (bfin_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && bfin_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ bfin_dev_select(ap, 0);
+
+ /* issue bus reset */
+ err_mask = bfin_bus_softreset(ap, devmask);
+ if (err_mask) {
+ ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+ err_mask);
+ return -EIO;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&ap->link.device[0],
+ devmask & (1 << 0), &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_sff_dev_classify(&ap->link.device[1],
+ devmask & (1 << 1), &err);
+
+ return 0;
+}
+
+/**
+ * bfin_bmdma_status - Read IDE DMA status
+ * @ap: Port associated with this ATA transaction.
+ */
+
+static unsigned char bfin_bmdma_status(struct ata_port *ap)
+{
+ unsigned char host_stat = 0;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned short int_status = ATAPI_GET_INT_STATUS(base);
+
+ if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
+ host_stat |= ATA_DMA_ACTIVE;
+ if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
+ ATAPI_DEV_INT))
+ host_stat |= ATA_DMA_INTR;
+ if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
+ host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
+
+ dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
+
+ return host_stat;
+}
+
+/**
+ * bfin_data_xfer - Transfer data by PIO
+ * @adev: device for this I/O
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @write_data: read/write
+ *
+ * Note: Original code is ata_sff_data_xfer().
+ */
+
+static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned int words = buflen >> 1;
+ unsigned short *buf16 = (u16 *)buf;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ read_atapi_data(base, words, buf16);
+ else
+ write_atapi_data(base, words, buf16);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned short align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (rw == READ) {
+ read_atapi_data(base, 1, align_buf);
+ memcpy(trailing_buf, align_buf, 1);
+ } else {
+ memcpy(align_buf, trailing_buf, 1);
+ write_atapi_data(base, 1, align_buf);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+/**
+ * bfin_irq_clear - Clear ATAPI interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Note: Original code is ata_sff_irq_clear().
+ */
+
+static void bfin_irq_clear(struct ata_port *ap)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ dev_dbg(ap->dev, "in atapi irq clear\n");
+ ATAPI_SET_INT_STATUS(base, ATAPI_GET_INT_STATUS(base)|ATAPI_DEV_INT
+ | MULTI_DONE_INT | UDMAIN_DONE_INT | UDMAOUT_DONE_INT
+ | MULTI_TERM_INT | UDMAIN_TERM_INT | UDMAOUT_TERM_INT);
+}
+
+/**
+ * bfin_irq_on - Enable interrupts on a port.
+ * @ap: Port on which interrupts are enabled.
+ *
+ * Note: Original code is ata_sff_irq_on().
+ */
+
+static unsigned char bfin_irq_on(struct ata_port *ap)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ u8 tmp;
+
+ dev_dbg(ap->dev, "in atapi irq on\n");
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+ tmp = ata_wait_idle(ap);
+
+ bfin_irq_clear(ap);
+
+ return tmp;
+}
+
+/**
+ * bfin_freeze - Freeze DMA controller port
+ * @ap: port to freeze
+ *
+ * Note: Original code is ata_sff_freeze().
+ */
+
+static void bfin_freeze(struct ata_port *ap)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ dev_dbg(ap->dev, "in atapi dma freeze\n");
+ ap->ctl |= ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+
+ /* Under certain circumstances, some controllers raise IRQ on
+ * ATA_NIEN manipulation. Also, many controllers fail to mask
+ * previously pending IRQ on ATA_NIEN assertion. Clear it.
+ */
+ ap->ops->sff_check_status(ap);
+
+ bfin_irq_clear(ap);
+}
+
+/**
+ * bfin_thaw - Thaw DMA controller port
+ * @ap: port to thaw
+ *
+ * Note: Original code is ata_sff_thaw().
+ */
+
+void bfin_thaw(struct ata_port *ap)
+{
+ dev_dbg(ap->dev, "in atapi dma thaw\n");
+ bfin_check_status(ap);
+ bfin_irq_on(ap);
+}
+
+/**
+ * bfin_postreset - standard postreset callback
+ * @ap: the target ata_port
+ * @classes: classes of attached devices
+ *
+ * Note: Original code is ata_sff_postreset().
+ */
+
+static void bfin_postreset(struct ata_link *link, unsigned int *classes)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ /* re-enable interrupts */
+ bfin_irq_on(ap);
+
+ /* is double-select really necessary? */
+ if (classes[0] != ATA_DEV_NONE)
+ bfin_dev_select(ap, 1);
+ if (classes[1] != ATA_DEV_NONE)
+ bfin_dev_select(ap, 0);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ return;
+ }
+
+ /* set up device control */
+ write_atapi_register(base, ATA_REG_CTRL, ap->ctl);
+}
+
+static void bfin_port_stop(struct ata_port *ap)
+{
+ dev_dbg(ap->dev, "in atapi port stop\n");
+ if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+ free_dma(CH_ATAPI_RX);
+ free_dma(CH_ATAPI_TX);
+ }
+}
+
+static int bfin_port_start(struct ata_port *ap)
+{
+ dev_dbg(ap->dev, "in atapi port start\n");
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return 0;
+
+ if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
+ if (request_dma(CH_ATAPI_TX,
+ "BFIN ATAPI TX DMA") >= 0)
+ return 0;
+
+ free_dma(CH_ATAPI_RX);
+ }
+
+ ap->udma_mask = 0;
+ ap->mwdma_mask = 0;
+ dev_err(ap->dev, "Unable to request ATAPI DMA!"
+ " Continue in PIO mode.\n");
+
+ return 0;
+}
+
+static unsigned int bfin_ata_host_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u8 status, host_stat = 0;
+
+ VPRINTK("ata%u: protocol %d task_state %d\n",
+ ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+ /* Check whether we are expecting interrupt in this state */
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Some pre-ATAPI-4 devices assert INTRQ
+ * at this state when ready to receive CDB.
+ */
+
+ /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+ * The flag was turned on only for atapi devices.
+ * No need to check is_atapi_taskfile(&qc->tf) again.
+ */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ goto idle_irq;
+ break;
+ case HSM_ST_LAST:
+ if (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATAPI_PROT_DMA) {
+ /* check status of DMA engine */
+ host_stat = ap->ops->bmdma_status(ap);
+ VPRINTK("ata%u: host_stat 0x%X\n",
+ ap->print_id, host_stat);
+
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ goto idle_irq;
+
+ /* before we do anything else, clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+ break;
+ case HSM_ST:
+ break;
+ default:
+ goto idle_irq;
+ }
+
+ /* check altstatus */
+ status = ap->ops->sff_check_altstatus(ap);
+ if (status & ATA_BUSY)
+ goto busy_ata;
+
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ goto busy_ata;
+
+ /* ack bmdma irq events */
+ ap->ops->sff_irq_clear(ap);
+
+ ata_sff_hsm_move(ap, qc, status, 0);
+
+ if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATAPI_PROT_DMA))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+busy_ata:
+ return 1; /* irq handled */
+
+idle_irq:
+ ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ ap->ops->irq_ack(ap, 0); /* debug trap */
+ ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ return 1;
+ }
+#endif
+ return 0; /* irq not handled */
+}
+
+static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ ap = host->ports[i];
+ if (ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+ (qc->flags & ATA_QCFLAG_ACTIVE))
+ handled |= bfin_ata_host_intr(ap, qc);
+ }
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+
+static struct scsi_host_template bfin_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = SG_NONE,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations bfin_pata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .set_piomode = bfin_set_piomode,
+ .set_dmamode = bfin_set_dmamode,
+
+ .sff_tf_load = bfin_tf_load,
+ .sff_tf_read = bfin_tf_read,
+ .sff_exec_command = bfin_exec_command,
+ .sff_check_status = bfin_check_status,
+ .sff_check_altstatus = bfin_check_altstatus,
+ .sff_dev_select = bfin_dev_select,
+
+ .bmdma_setup = bfin_bmdma_setup,
+ .bmdma_start = bfin_bmdma_start,
+ .bmdma_stop = bfin_bmdma_stop,
+ .bmdma_status = bfin_bmdma_status,
+ .sff_data_xfer = bfin_data_xfer,
+
+ .qc_prep = ata_noop_qc_prep,
+
+ .freeze = bfin_freeze,
+ .thaw = bfin_thaw,
+ .softreset = bfin_softreset,
+ .postreset = bfin_postreset,
+
+ .sff_irq_clear = bfin_irq_clear,
+ .sff_irq_on = bfin_irq_on,
+
+ .port_start = bfin_port_start,
+ .port_stop = bfin_port_stop,
+};
+
+static struct ata_port_info bfin_port_info[] = {
+ {
+ .flags = ATA_FLAG_SLAVE_POSS
+ | ATA_FLAG_MMIO
+ | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0,
+ .udma_mask = 0,
+ .port_ops = &bfin_pata_ops,
+ },
+};
+
+/**
+ * bfin_reset_controller - initialize BF54x ATAPI controller.
+ */
+
+static int bfin_reset_controller(struct ata_host *host)
+{
+ void __iomem *base = (void __iomem *)host->ports[0]->ioaddr.ctl_addr;
+ int count;
+ unsigned short status;
+
+ /* Disable all ATAPI interrupts */
+ ATAPI_SET_INT_MASK(base, 0);
+ SSYNC();
+
+ /* Assert the RESET signal 25us*/
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | DEV_RST);
+ udelay(30);
+
+ /* Negate the RESET signal for 2ms*/
+ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) & ~DEV_RST);
+ msleep(2);
+
+ /* Wait on Busy flag to clear */
+ count = 10000000;
+ do {
+ status = read_atapi_register(base, ATA_REG_STATUS);
+ } while (--count && (status & ATA_BUSY));
+
+ /* Enable only ATAPI Device interrupt */
+ ATAPI_SET_INT_MASK(base, 1);
+ SSYNC();
+
+ return (!count);
+}
+
+/**
+ * atapi_io_port - define atapi peripheral port pins.
+ */
+static unsigned short atapi_io_port[] = {
+ P_ATAPI_RESET,
+ P_ATAPI_DIOR,
+ P_ATAPI_DIOW,
+ P_ATAPI_CS0,
+ P_ATAPI_CS1,
+ P_ATAPI_DMACK,
+ P_ATAPI_DMARQ,
+ P_ATAPI_INTRQ,
+ P_ATAPI_IORDY,
+ 0
+};
+
+/**
+ * bfin_atapi_probe - attach a bfin atapi interface
+ * @pdev: platform device
+ *
+ * Register a bfin atapi interface.
+ *
+ *
+ * Platform devices are expected to contain 2 resources per port:
+ *
+ * - I/O Base (IORESOURCE_IO)
+ * - IRQ (IORESOURCE_IRQ)
+ *
+ */
+static int __devinit bfin_atapi_probe(struct platform_device *pdev)
+{
+ int board_idx = 0;
+ struct resource *res;
+ struct ata_host *host;
+ unsigned int fsclk = get_sclk();
+ int udma_mode = 5;
+ const struct ata_port_info *ppi[] =
+ { &bfin_port_info[board_idx], NULL };
+
+ /*
+ * Simple resource validation ..
+ */
+ if (unlikely(pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ while (bfin_port_info[board_idx].udma_mask > 0 &&
+ udma_fsclk[udma_mode] > fsclk) {
+ udma_mode--;
+ bfin_port_info[board_idx].udma_mask >>= 1;
+ }
+
+ /*
+ * Now that that's out of the way, wire up the port..
+ */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
+ if (!host)
+ return -ENOMEM;
+
+ host->ports[0]->ioaddr.ctl_addr = (void *)res->start;
+
+ if (peripheral_request_list(atapi_io_port, "atapi-io-port")) {
+ dev_err(&pdev->dev, "Requesting Peripherals faild\n");
+ return -EFAULT;
+ }
+
+ if (bfin_reset_controller(host)) {
+ peripheral_free_list(atapi_io_port);
+ dev_err(&pdev->dev, "Fail to reset ATAPI device\n");
+ return -EFAULT;
+ }
+
+ if (ata_host_activate(host, platform_get_irq(pdev, 0),
+ bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
+ peripheral_free_list(atapi_io_port);
+ dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, host);
+
+ return 0;
+}
+
+/**
+ * bfin_atapi_remove - unplug a bfin atapi interface
+ * @pdev: platform device
+ *
+ * A bfin atapi device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit bfin_atapi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ peripheral_free_list(atapi_io_port);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ if (host)
+ return ata_host_suspend(host, state);
+ else
+ return 0;
+}
+
+static int bfin_atapi_resume(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (host) {
+ ret = bfin_reset_controller(host);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return ret;
+ }
+ ata_host_resume(host);
+ }
+
+ return 0;
+}
+#else
+#define bfin_atapi_suspend NULL
+#define bfin_atapi_resume NULL
+#endif
+
+static struct platform_driver bfin_atapi_driver = {
+ .probe = bfin_atapi_probe,
+ .remove = __devexit_p(bfin_atapi_remove),
+ .suspend = bfin_atapi_suspend,
+ .resume = bfin_atapi_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+#define ATAPI_MODE_SIZE 10
+static char bfin_atapi_mode[ATAPI_MODE_SIZE];
+
+static int __init bfin_atapi_init(void)
+{
+ pr_info("register bfin atapi driver\n");
+
+ switch(bfin_atapi_mode[0]) {
+ case 'p':
+ case 'P':
+ break;
+ case 'm':
+ case 'M':
+ bfin_port_info[0].mwdma_mask = ATA_MWDMA2;
+ break;
+ default:
+ bfin_port_info[0].udma_mask = ATA_UDMA5;
+ };
+
+ return platform_driver_register(&bfin_atapi_driver);
+}
+
+static void __exit bfin_atapi_exit(void)
+{
+ platform_driver_unregister(&bfin_atapi_driver);
+}
+
+module_init(bfin_atapi_init);
+module_exit(bfin_atapi_exit);
+/*
+ * ATAPI mode:
+ * pio/PIO
+ * udma/UDMA (default)
+ * mwdma/MWDMA
+ */
+module_param_string(bfin_atapi_mode, bfin_atapi_mode, ATAPI_MODE_SIZE, 0);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("PATA driver for blackfin 54x ATAPI controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
new file mode 100644
index 0000000..34a3942
--- /dev/null
+++ b/drivers/ata/pata_cmd640.c
@@ -0,0 +1,277 @@
+/*
+ * pata_cmd640.c - CMD640 PCI PATA for new ATA layer
+ * (C) 2007 Red Hat Inc
+ *
+ * Based upon
+ * linux/drivers/ide/pci/cmd640.c Version 1.02 Sep 01, 1996
+ *
+ * Copyright (C) 1995-1996 Linus Torvalds & authors (see driver)
+ *
+ * This drives only the PCI version of the controller. If you have a
+ * VLB one then we have enough docs to support it but you can write
+ * your own code.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd640"
+#define DRV_VERSION "0.0.5"
+
+struct cmd640_reg {
+ int last;
+ u8 reg58[ATA_MAX_DEVICES];
+};
+
+enum {
+ CFR = 0x50,
+ CNTRL = 0x51,
+ CMDTIM = 0x52,
+ ARTIM0 = 0x53,
+ DRWTIM0 = 0x54,
+ ARTIM23 = 0x57,
+ DRWTIM23 = 0x58,
+ BRST = 0x59
+};
+
+/**
+ * cmd640_set_piomode - set initial PIO mode data
+ * @ap: ATA port
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup.
+ */
+
+static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct cmd640_reg *timing = ap->private_data;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_timing t;
+ const unsigned long T = 1000000 / 33;
+ const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+ u8 reg;
+ int arttim = ARTIM0 + 2 * adev->devno;
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
+ printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ return;
+ }
+
+ /* The second channel has shared timings and the setup timing is
+ messy to switch to merge it for worst case */
+ if (ap->port_no && pair) {
+ struct ata_timing p;
+ ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP);
+ }
+
+ /* Make the timings fit */
+ if (t.recover > 16) {
+ t.active += t.recover - 16;
+ t.recover = 16;
+ }
+ if (t.active > 16)
+ t.active = 16;
+
+ /* Now convert the clocks into values we can actually stuff into
+ the chip */
+
+ if (t.recover > 1)
+ t.recover--; /* 640B only */
+ else
+ t.recover = 15;
+
+ if (t.setup > 4)
+ t.setup = 0xC0;
+ else
+ t.setup = setup_data[t.setup];
+
+ if (ap->port_no == 0) {
+ t.active &= 0x0F; /* 0 = 16 */
+
+ /* Load setup timing */
+ pci_read_config_byte(pdev, arttim, &reg);
+ reg &= 0x3F;
+ reg |= t.setup;
+ pci_write_config_byte(pdev, arttim, reg);
+
+ /* Load active/recovery */
+ pci_write_config_byte(pdev, arttim + 1, (t.active << 4) | t.recover);
+ } else {
+ /* Save the shared timings for channel, they will be loaded
+ by qc_issue. Reloading the setup time is expensive so we
+ keep a merged one loaded */
+ pci_read_config_byte(pdev, ARTIM23, &reg);
+ reg &= 0x3F;
+ reg |= t.setup;
+ pci_write_config_byte(pdev, ARTIM23, reg);
+ timing->reg58[adev->devno] = (t.active << 4) | t.recover;
+ }
+}
+
+
+/**
+ * cmd640_qc_issue - command preparation hook
+ * @qc: Command to be issued
+ *
+ * Channel 1 has shared timings. We must reprogram the
+ * clock each drive 2/3 switch we do.
+ */
+
+static unsigned int cmd640_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct cmd640_reg *timing = ap->private_data;
+
+ if (ap->port_no != 0 && adev->devno != timing->last) {
+ pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
+ timing->last = adev->devno;
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+/**
+ * cmd640_port_start - port setup
+ * @ap: ATA port being set up
+ *
+ * The CMD640 needs to maintain private data structures so we
+ * allocate space here.
+ */
+
+static int cmd640_port_start(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct cmd640_reg *timing;
+
+ int ret = ata_sff_port_start(ap);
+ if (ret < 0)
+ return ret;
+
+ timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL);
+ if (timing == NULL)
+ return -ENOMEM;
+ timing->last = -1; /* Force a load */
+ ap->private_data = timing;
+ return ret;
+}
+
+static struct scsi_host_template cmd640_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cmd640_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ /* In theory xfer_noirq is not needed once we kill the prefetcher */
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .qc_issue = cmd640_qc_issue,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cmd640_set_piomode,
+ .port_start = cmd640_port_start,
+};
+
+static void cmd640_hardware_init(struct pci_dev *pdev)
+{
+ u8 r;
+ u8 ctrl;
+
+ /* CMD640 detected, commiserations */
+ pci_write_config_byte(pdev, 0x5B, 0x00);
+ /* Get version info */
+ pci_read_config_byte(pdev, CFR, &r);
+ /* PIO0 command cycles */
+ pci_write_config_byte(pdev, CMDTIM, 0);
+ /* 512 byte bursts (sector) */
+ pci_write_config_byte(pdev, BRST, 0x40);
+ /*
+ * A reporter a long time ago
+ * Had problems with the data fifo
+ * So don't run the risk
+ * Of putting crap on the disk
+ * For its better just to go slow
+ */
+ /* Do channel 0 */
+ pci_read_config_byte(pdev, CNTRL, &ctrl);
+ pci_write_config_byte(pdev, CNTRL, ctrl | 0xC0);
+ /* Ditto for channel 1 */
+ pci_read_config_byte(pdev, ARTIM23, &ctrl);
+ ctrl |= 0x0C;
+ pci_write_config_byte(pdev, ARTIM23, ctrl);
+}
+
+static int cmd640_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &cmd640_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ cmd640_hardware_init(pdev);
+
+ return ata_pci_sff_init_one(pdev, ppi, &cmd640_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cmd640_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ cmd640_hardware_init(pdev);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id cmd640[] = {
+ { PCI_VDEVICE(CMD, 0x640), 0 },
+ { },
+};
+
+static struct pci_driver cmd640_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cmd640,
+ .probe = cmd640_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = cmd640_reinit_one,
+#endif
+};
+
+static int __init cmd640_init(void)
+{
+ return pci_register_driver(&cmd640_pci_driver);
+}
+
+static void __exit cmd640_exit(void)
+{
+ pci_unregister_driver(&cmd640_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD640 PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd640);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cmd640_init);
+module_exit(cmd640_exit);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
new file mode 100644
index 0000000..3167d8f
--- /dev/null
+++ b/drivers/ata/pata_cmd64x.c
@@ -0,0 +1,440 @@
+/*
+ * pata_cmd64x.c - CMD64x PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Based upon
+ * linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
+ *
+ * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
+ * Note, this driver is not used at all on other systems because
+ * there the "BIOS" has done all of the following already.
+ * Due to massive hardware bugs, UltraDMA is only supported
+ * on the 646U2 and not on the 646U.
+ *
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ *
+ * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
+ *
+ * TODO
+ * Testing work
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cmd64x"
+#define DRV_VERSION "0.2.5"
+
+/*
+ * CMD64x specific registers definition.
+ */
+
+enum {
+ CFR = 0x50,
+ CFR_INTR_CH0 = 0x02,
+ CNTRL = 0x51,
+ CNTRL_DIS_RA0 = 0x40,
+ CNTRL_DIS_RA1 = 0x80,
+ CNTRL_ENA_2ND = 0x08,
+ CMDTIM = 0x52,
+ ARTTIM0 = 0x53,
+ DRWTIM0 = 0x54,
+ ARTTIM1 = 0x55,
+ DRWTIM1 = 0x56,
+ ARTTIM23 = 0x57,
+ ARTTIM23_DIS_RA2 = 0x04,
+ ARTTIM23_DIS_RA3 = 0x08,
+ ARTTIM23_INTR_CH1 = 0x10,
+ ARTTIM2 = 0x57,
+ ARTTIM3 = 0x57,
+ DRWTIM23 = 0x58,
+ DRWTIM2 = 0x58,
+ BRST = 0x59,
+ DRWTIM3 = 0x5b,
+ BMIDECR0 = 0x70,
+ MRDMODE = 0x71,
+ MRDMODE_INTR_CH0 = 0x04,
+ MRDMODE_INTR_CH1 = 0x08,
+ MRDMODE_BLK_CH0 = 0x10,
+ MRDMODE_BLK_CH1 = 0x20,
+ BMIDESR0 = 0x72,
+ UDIDETCR0 = 0x73,
+ DTPR0 = 0x74,
+ BMIDECR1 = 0x78,
+ BMIDECSR = 0x79,
+ BMIDESR1 = 0x7A,
+ UDIDETCR1 = 0x7B,
+ DTPR1 = 0x7C
+};
+
+static int cmd648_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 r;
+
+ /* Check cable detect bits */
+ pci_read_config_byte(pdev, BMIDECSR, &r);
+ if (r & (1 << ap->port_no))
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * cmd64x_set_piomode - set PIO and MWDMA timing
+ * @ap: ATA interface
+ * @adev: ATA device
+ * @mode: mode
+ *
+ * Called to do the PIO and MWDMA mode setup.
+ */
+
+static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_timing t;
+ const unsigned long T = 1000000 / 33;
+ const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
+
+ u8 reg;
+
+ /* Port layout is not logical so use a table */
+ const u8 arttim_port[2][2] = {
+ { ARTTIM0, ARTTIM1 },
+ { ARTTIM23, ARTTIM23 }
+ };
+ const u8 drwtim_port[2][2] = {
+ { DRWTIM0, DRWTIM1 },
+ { DRWTIM2, DRWTIM3 }
+ };
+
+ int arttim = arttim_port[ap->port_no][adev->devno];
+ int drwtim = drwtim_port[ap->port_no][adev->devno];
+
+ /* ata_timing_compute is smart and will produce timings for MWDMA
+ that don't violate the drives PIO capabilities. */
+ if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
+ printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ return;
+ }
+ if (ap->port_no) {
+ /* Slave has shared address setup */
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ if (pair) {
+ struct ata_timing tp;
+ ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
+ ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+ }
+ }
+
+ printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
+ t.active, t.recover, t.setup);
+ if (t.recover > 16) {
+ t.active += t.recover - 16;
+ t.recover = 16;
+ }
+ if (t.active > 16)
+ t.active = 16;
+
+ /* Now convert the clocks into values we can actually stuff into
+ the chip */
+
+ if (t.recover > 1)
+ t.recover--;
+ else
+ t.recover = 15;
+
+ if (t.setup > 4)
+ t.setup = 0xC0;
+ else
+ t.setup = setup_data[t.setup];
+
+ t.active &= 0x0F; /* 0 = 16 */
+
+ /* Load setup timing */
+ pci_read_config_byte(pdev, arttim, &reg);
+ reg &= 0x3F;
+ reg |= t.setup;
+ pci_write_config_byte(pdev, arttim, reg);
+
+ /* Load active/recovery */
+ pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
+}
+
+/**
+ * cmd64x_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Used when configuring the devices ot set the PIO timings. All the
+ * actual work is done by the PIO/MWDMA setting helper
+ */
+
+static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ cmd64x_set_timing(ap, adev, adev->pio_mode);
+}
+
+/**
+ * cmd64x_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the DMA mode setup.
+ */
+
+static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u8 udma_data[] = {
+ 0x30, 0x20, 0x10, 0x20, 0x10, 0x00
+ };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 regU, regD;
+
+ int pciU = UDIDETCR0 + 8 * ap->port_no;
+ int pciD = BMIDESR0 + 8 * ap->port_no;
+ int shift = 2 * adev->devno;
+
+ pci_read_config_byte(pdev, pciD, &regD);
+ pci_read_config_byte(pdev, pciU, &regU);
+
+ /* DMA bits off */
+ regD &= ~(0x20 << adev->devno);
+ /* DMA control bits */
+ regU &= ~(0x30 << shift);
+ /* DMA timing bits */
+ regU &= ~(0x05 << adev->devno);
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ /* Merge the timing value */
+ regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
+ /* Merge the control bits */
+ regU |= 1 << adev->devno; /* UDMA on */
+ if (adev->dma_mode > 2) /* 15nS timing */
+ regU |= 4 << adev->devno;
+ } else {
+ regU &= ~ (1 << adev->devno); /* UDMA off */
+ cmd64x_set_timing(ap, adev, adev->dma_mode);
+ }
+
+ regD |= 0x20 << adev->devno;
+
+ pci_write_config_byte(pdev, pciU, regU);
+ pci_write_config_byte(pdev, pciD, regD);
+}
+
+/**
+ * cmd648_dma_stop - DMA stop callback
+ * @qc: Command in progress
+ *
+ * DMA has completed.
+ */
+
+static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 dma_intr;
+ int dma_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
+ int dma_reg = ap->port_no ? ARTTIM2 : CFR;
+
+ ata_bmdma_stop(qc);
+
+ pci_read_config_byte(pdev, dma_reg, &dma_intr);
+ pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask);
+}
+
+/**
+ * cmd646r1_dma_stop - DMA stop callback
+ * @qc: Command in progress
+ *
+ * Stub for now while investigating the r1 quirk in the old driver.
+ */
+
+static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ ata_bmdma_stop(qc);
+}
+
+static struct scsi_host_template cmd64x_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations cmd64x_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_piomode = cmd64x_set_piomode,
+ .set_dmamode = cmd64x_set_dmamode,
+};
+
+static struct ata_port_operations cmd64x_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .cable_detect = ata_cable_40wire,
+};
+
+static struct ata_port_operations cmd646r1_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd646r1_bmdma_stop,
+ .cable_detect = ata_cable_40wire,
+};
+
+static struct ata_port_operations cmd648_port_ops = {
+ .inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd648_bmdma_stop,
+ .cable_detect = cmd648_cable_detect,
+};
+
+static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u32 class_rev;
+
+ static const struct ata_port_info cmd_info[6] = {
+ { /* CMD 643 - no UDMA */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &cmd64x_port_ops
+ },
+ { /* CMD 646 with broken UDMA */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &cmd64x_port_ops
+ },
+ { /* CMD 646 with working UDMA */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &cmd64x_port_ops
+ },
+ { /* CMD 646 rev 1 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &cmd646r1_port_ops
+ },
+ { /* CMD 648 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &cmd648_port_ops
+ },
+ { /* CMD 649 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &cmd648_port_ops
+ }
+ };
+ const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
+ u8 mrdmode;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
+ if (id->driver_data == 0) /* 643 */
+ ata_pci_bmdma_clear_simplex(pdev);
+
+ if (pdev->device == PCI_DEVICE_ID_CMD_646) {
+ /* Does UDMA work ? */
+ if (class_rev > 4)
+ ppi[0] = &cmd_info[2];
+ /* Early rev with other problems ? */
+ else if (class_rev == 1)
+ ppi[0] = &cmd_info[3];
+ }
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+ pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+ mrdmode &= ~ 0x30; /* IRQ set up */
+ mrdmode |= 0x02; /* Memory read line enable */
+ pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
+ /* Force PIO 0 here.. */
+
+ /* PPC specific fixup copied from old driver */
+#ifdef CONFIG_PPC
+ pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+
+ return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cmd64x_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ u8 mrdmode;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+ pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+ mrdmode &= ~ 0x30; /* IRQ set up */
+ mrdmode |= 0x02; /* Memory read line enable */
+ pci_write_config_byte(pdev, MRDMODE, mrdmode);
+#ifdef CONFIG_PPC
+ pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id cmd64x[] = {
+ { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
+ { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
+ { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 4 },
+ { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 5 },
+
+ { },
+};
+
+static struct pci_driver cmd64x_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cmd64x,
+ .probe = cmd64x_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = cmd64x_reinit_one,
+#endif
+};
+
+static int __init cmd64x_init(void)
+{
+ return pci_register_driver(&cmd64x_pci_driver);
+}
+
+static void __exit cmd64x_exit(void)
+{
+ pci_unregister_driver(&cmd64x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cmd64x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cmd64x_init);
+module_exit(cmd64x_exit);
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
new file mode 100644
index 0000000..1186bcd
--- /dev/null
+++ b/drivers/ata/pata_cs5520.c
@@ -0,0 +1,361 @@
+/*
+ * IDE tuning and bus mastering support for the CS5510/CS5520
+ * chipsets
+ *
+ * The CS5510/CS5520 are slightly unusual devices. Unlike the
+ * typical IDE controllers they do bus mastering with the drive in
+ * PIO mode and smarter silicon.
+ *
+ * The practical upshot of this is that we must always tune the
+ * drive for the right PIO mode. We must also ignore all the blacklists
+ * and the drive bus mastering DMA information. Also to confuse matters
+ * further we can do DMA on PIO only drives.
+ *
+ * DMA on the 5510 also requires we disable_hlt() during DMA on early
+ * revisions.
+ *
+ * *** This driver is strictly experimental ***
+ *
+ * (c) Copyright Red Hat Inc 2002
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Documentation:
+ * Not publically available.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cs5520"
+#define DRV_VERSION "0.6.6"
+
+struct pio_clocks
+{
+ int address;
+ int assert;
+ int recovery;
+};
+
+static const struct pio_clocks cs5520_pio_clocks[]={
+ {3, 6, 11},
+ {2, 5, 6},
+ {1, 4, 3},
+ {1, 3, 2},
+ {1, 2, 1}
+};
+
+/**
+ * cs5520_set_timings - program PIO timings
+ * @ap: ATA port
+ * @adev: ATA device
+ *
+ * Program the PIO mode timings for the controller according to the pio
+ * clocking table.
+ */
+
+static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int slave = adev->devno;
+
+ pio -= XFER_PIO_0;
+
+ /* Channel command timing */
+ pci_write_config_byte(pdev, 0x62 + ap->port_no,
+ (cs5520_pio_clocks[pio].recovery << 4) |
+ (cs5520_pio_clocks[pio].assert));
+ /* FIXME: should these use address ? */
+ /* Read command timing */
+ pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave,
+ (cs5520_pio_clocks[pio].recovery << 4) |
+ (cs5520_pio_clocks[pio].assert));
+ /* Write command timing */
+ pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave,
+ (cs5520_pio_clocks[pio].recovery << 4) |
+ (cs5520_pio_clocks[pio].assert));
+}
+
+/**
+ * cs5520_enable_dma - turn on DMA bits
+ *
+ * Turn on the DMA bits for this disk. Needed because the BIOS probably
+ * has not done the work for us. Belongs in the core SATA code.
+ */
+
+static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
+{
+ /* Set the DMA enable/disable flag */
+ u8 reg = ioread8(ap->ioaddr.bmdma_addr + 0x02);
+ reg |= 1<<(adev->devno + 5);
+ iowrite8(reg, ap->ioaddr.bmdma_addr + 0x02);
+}
+
+/**
+ * cs5520_set_dmamode - program DMA timings
+ * @ap: ATA port
+ * @adev: ATA device
+ *
+ * Program the DMA mode timings for the controller according to the pio
+ * clocking table. Note that this device sets the DMA timings to PIO
+ * mode values. This may seem bizarre but the 5520 architecture talks
+ * PIO mode to the disk and DMA mode to the controller so the underlying
+ * transfers are PIO timed.
+ */
+
+static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 };
+ cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]);
+ cs5520_enable_dma(ap, adev);
+}
+
+/**
+ * cs5520_set_piomode - program PIO timings
+ * @ap: ATA port
+ * @adev: ATA device
+ *
+ * Program the PIO mode timings for the controller according to the pio
+ * clocking table. We know pio_mode will equal dma_mode because of the
+ * CS5520 architecture. At least once we turned DMA on and wrote a
+ * mode setter.
+ */
+
+static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ cs5520_set_timings(ap, adev, adev->pio_mode);
+}
+
+static struct scsi_host_template cs5520_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations cs5520_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cs5520_set_piomode,
+ .set_dmamode = cs5520_set_dmamode,
+};
+
+static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const unsigned int cmd_port[] = { 0x1F0, 0x170 };
+ static const unsigned int ctl_port[] = { 0x3F6, 0x376 };
+ struct ata_port_info pi = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &cs5520_port_ops,
+ };
+ const struct ata_port_info *ppi[2];
+ u8 pcicfg;
+ void __iomem *iomap[5];
+ struct ata_host *host;
+ struct ata_ioports *ioaddr;
+ int i, rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* IDE port enable bits */
+ pci_read_config_byte(pdev, 0x60, &pcicfg);
+
+ /* Check if the ATA ports are enabled */
+ if ((pcicfg & 3) == 0)
+ return -ENODEV;
+
+ ppi[0] = ppi[1] = &ata_dummy_port_info;
+ if (pcicfg & 1)
+ ppi[0] = &pi;
+ if (pcicfg & 2)
+ ppi[1] = &pi;
+
+ if ((pcicfg & 0x40) == 0) {
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "DMA mode disabled. Enabling.\n");
+ pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+ }
+
+ pi.mwdma_mask = id->driver_data;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host)
+ return -ENOMEM;
+
+ /* Perform set up for DMA */
+ if (pci_enable_device_io(pdev)) {
+ printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
+ return -ENODEV;
+ }
+
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
+ return -ENODEV;
+ }
+ if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
+ return -ENODEV;
+ }
+
+ /* Map IO ports and initialize host accordingly */
+ iomap[0] = devm_ioport_map(&pdev->dev, cmd_port[0], 8);
+ iomap[1] = devm_ioport_map(&pdev->dev, ctl_port[0], 1);
+ iomap[2] = devm_ioport_map(&pdev->dev, cmd_port[1], 8);
+ iomap[3] = devm_ioport_map(&pdev->dev, ctl_port[1], 1);
+ iomap[4] = pcim_iomap(pdev, 2, 0);
+
+ if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
+ return -ENOMEM;
+
+ ioaddr = &host->ports[0]->ioaddr;
+ ioaddr->cmd_addr = iomap[0];
+ ioaddr->ctl_addr = iomap[1];
+ ioaddr->altstatus_addr = iomap[1];
+ ioaddr->bmdma_addr = iomap[4];
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_desc(host->ports[0],
+ "cmd 0x%x ctl 0x%x", cmd_port[0], ctl_port[0]);
+ ata_port_pbar_desc(host->ports[0], 4, 0, "bmdma");
+
+ ioaddr = &host->ports[1]->ioaddr;
+ ioaddr->cmd_addr = iomap[2];
+ ioaddr->ctl_addr = iomap[3];
+ ioaddr->altstatus_addr = iomap[3];
+ ioaddr->bmdma_addr = iomap[4] + 8;
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_desc(host->ports[1],
+ "cmd 0x%x ctl 0x%x", cmd_port[1], ctl_port[1]);
+ ata_port_pbar_desc(host->ports[1], 4, 8, "bmdma");
+
+ /* activate the host */
+ pci_set_master(pdev);
+ rc = ata_host_start(host);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < 2; i++) {
+ static const int irq[] = { 14, 15 };
+ struct ata_port *ap = host->ports[i];
+
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
+ ata_sff_interrupt, 0, DRV_NAME, host);
+ if (rc)
+ return rc;
+
+ ata_port_desc(ap, "irq %d", irq[i]);
+ }
+
+ return ata_host_register(host, &cs5520_sht);
+}
+
+#ifdef CONFIG_PM
+/**
+ * cs5520_reinit_one - device resume
+ * @pdev: PCI device
+ *
+ * Do any reconfiguration work needed by a resume from RAM. We need
+ * to restore DMA mode support on BIOSen which disabled it
+ */
+
+static int cs5520_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ u8 pcicfg;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ pci_read_config_byte(pdev, 0x60, &pcicfg);
+ if ((pcicfg & 0x40) == 0)
+ pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
+
+ ata_host_resume(host);
+ return 0;
+}
+
+/**
+ * cs5520_pci_device_suspend - device suspend
+ * @pdev: PCI device
+ *
+ * We have to cut and waste bits from the standard method because
+ * the 5520 is a bit odd and not just a pure ATA device. As a result
+ * we must not disable it. The needed code is short and this avoids
+ * chip specific mess in the core code.
+ */
+
+static int cs5520_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = ata_host_suspend(host, mesg);
+ if (rc)
+ return rc;
+
+ pci_save_state(pdev);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/* For now keep DMA off. We can set it for all but A rev CS5510 once the
+ core ATA code can handle it */
+
+static const struct pci_device_id pata_cs5520[] = {
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
+
+ { },
+};
+
+static struct pci_driver cs5520_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pata_cs5520,
+ .probe = cs5520_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = cs5520_pci_device_suspend,
+ .resume = cs5520_reinit_one,
+#endif
+};
+
+static int __init cs5520_init(void)
+{
+ return pci_register_driver(&cs5520_pci_driver);
+}
+
+static void __exit cs5520_exit(void)
+{
+ pci_unregister_driver(&cs5520_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_cs5520);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5520_init);
+module_exit(cs5520_exit);
+
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
new file mode 100644
index 0000000..bba4533
--- /dev/null
+++ b/drivers/ata/pata_cs5530.c
@@ -0,0 +1,383 @@
+/*
+ * pata-cs5530.c - CS5530 PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * based upon cs5530.c by Mark Lord.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ * Available from AMD web site.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_cs5530"
+#define DRV_VERSION "0.7.4"
+
+static void __iomem *cs5530_port_base(struct ata_port *ap)
+{
+ unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
+
+ return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
+}
+
+/**
+ * cs5530_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Set our PIO requirements. This is fairly simple on the CS5530
+ * chips.
+ */
+
+static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const unsigned int cs5530_pio_timings[2][5] = {
+ {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
+ {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
+ };
+ void __iomem *base = cs5530_port_base(ap);
+ u32 tuning;
+ int format;
+
+ /* Find out which table to use */
+ tuning = ioread32(base + 0x04);
+ format = (tuning & 0x80000000UL) ? 1 : 0;
+
+ /* Now load the right timing register */
+ if (adev->devno)
+ base += 0x08;
+
+ iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
+}
+
+/**
+ * cs5530_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * We cannot mix MWDMA and UDMA without reloading timings each switch
+ * master to slave. We track the last DMA setup in order to minimise
+ * reloads.
+ */
+
+static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ void __iomem *base = cs5530_port_base(ap);
+ u32 tuning, timing = 0;
+ u8 reg;
+
+ /* Find out which table to use */
+ tuning = ioread32(base + 0x04);
+
+ switch(adev->dma_mode) {
+ case XFER_UDMA_0:
+ timing = 0x00921250;break;
+ case XFER_UDMA_1:
+ timing = 0x00911140;break;
+ case XFER_UDMA_2:
+ timing = 0x00911030;break;
+ case XFER_MW_DMA_0:
+ timing = 0x00077771;break;
+ case XFER_MW_DMA_1:
+ timing = 0x00012121;break;
+ case XFER_MW_DMA_2:
+ timing = 0x00002020;break;
+ default:
+ BUG();
+ }
+ /* Merge in the PIO format bit */
+ timing |= (tuning & 0x80000000UL);
+ if (adev->devno == 0) /* Master */
+ iowrite32(timing, base + 0x04);
+ else {
+ if (timing & 0x00100000)
+ tuning |= 0x00100000; /* UDMA for both */
+ else
+ tuning &= ~0x00100000; /* MWDMA for both */
+ iowrite32(tuning, base + 0x04);
+ iowrite32(timing, base + 0x0C);
+ }
+
+ /* Set the DMA capable bit in the BMDMA area */
+ reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ reg |= (1 << (5 + adev->devno));
+ iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+
+ /* Remember the last DMA setup we did */
+
+ ap->private_data = adev;
+}
+
+/**
+ * cs5530_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary. Specifically we have a problem that there is only
+ * one MWDMA/UDMA bit.
+ */
+
+static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct ata_device *prev = ap->private_data;
+
+ /* See if the DMA settings could be wrong */
+ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
+ /* Maybe, but do the channels match MWDMA/UDMA ? */
+ if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+ (ata_using_udma(prev) && !ata_using_udma(adev)))
+ /* Switch the mode bits */
+ cs5530_set_dmamode(ap, adev);
+ }
+
+ return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template cs5530_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations cs5530_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_issue = cs5530_qc_issue,
+
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cs5530_set_piomode,
+ .set_dmamode = cs5530_set_dmamode,
+};
+
+static const struct dmi_system_id palmax_dmi_table[] = {
+ {
+ .ident = "Palmax PD1100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
+ },
+ },
+ { }
+};
+
+static int cs5530_is_palmax(void)
+{
+ if (dmi_check_system(palmax_dmi_table)) {
+ printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
+ return 1;
+ }
+ return 0;
+}
+
+
+/**
+ * cs5530_init_chip - Chipset init
+ *
+ * Perform the chip initialisation work that is shared between both
+ * setup and resume paths
+ */
+
+static int cs5530_init_chip(void)
+{
+ struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
+
+ while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
+ switch (dev->device) {
+ case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
+ master_0 = pci_dev_get(dev);
+ break;
+ case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
+ cs5530_0 = pci_dev_get(dev);
+ break;
+ }
+ }
+ if (!master_0) {
+ printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
+ goto fail_put;
+ }
+ if (!cs5530_0) {
+ printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
+ goto fail_put;
+ }
+
+ pci_set_master(cs5530_0);
+ pci_try_set_mwi(cs5530_0);
+
+ /*
+ * Set PCI CacheLineSize to 16-bytes:
+ * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
+ *
+ * Note: This value is constant because the 5530 is only a Geode companion
+ */
+
+ pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
+
+ /*
+ * Disable trapping of UDMA register accesses (Win98 hack):
+ * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
+ */
+
+ pci_write_config_word(cs5530_0, 0xd0, 0x5006);
+
+ /*
+ * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
+ * The other settings are what is necessary to get the register
+ * into a sane state for IDE DMA operation.
+ */
+
+ pci_write_config_byte(master_0, 0x40, 0x1e);
+
+ /*
+ * Set max PCI burst size (16-bytes seems to work best):
+ * 16bytes: set bit-1 at 0x41 (reg value of 0x16)
+ * all others: clear bit-1 at 0x41, and do:
+ * 128bytes: OR 0x00 at 0x41
+ * 256bytes: OR 0x04 at 0x41
+ * 512bytes: OR 0x08 at 0x41
+ * 1024bytes: OR 0x0c at 0x41
+ */
+
+ pci_write_config_byte(master_0, 0x41, 0x14);
+
+ /*
+ * These settings are necessary to get the chip
+ * into a sane state for IDE DMA operation.
+ */
+
+ pci_write_config_byte(master_0, 0x42, 0x00);
+ pci_write_config_byte(master_0, 0x43, 0xc1);
+
+ pci_dev_put(master_0);
+ pci_dev_put(cs5530_0);
+ return 0;
+fail_put:
+ if (master_0)
+ pci_dev_put(master_0);
+ if (cs5530_0)
+ pci_dev_put(cs5530_0);
+ return -ENODEV;
+}
+
+/**
+ * cs5530_init_one - Initialise a CS5530
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Install a driver for the newly found CS5530 companion chip. Most of
+ * this is just housekeeping. We have to set the chip up correctly and
+ * turn off various bits of emulation magic.
+ */
+
+static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07,
+ .port_ops = &cs5530_port_ops
+ };
+ /* The docking connector doesn't do UDMA, and it seems not MWDMA */
+ static const struct ata_port_info info_palmax_secondary = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &cs5530_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Chip initialisation */
+ if (cs5530_init_chip())
+ return -ENODEV;
+
+ if (cs5530_is_palmax())
+ ppi[1] = &info_palmax_secondary;
+
+ /* Now kick off ATA set up */
+ return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cs5530_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ /* If we fail on resume we are doomed */
+ if (cs5530_init_chip())
+ return -EIO;
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id cs5530[] = {
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
+
+ { },
+};
+
+static struct pci_driver cs5530_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cs5530,
+ .probe = cs5530_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = cs5530_reinit_one,
+#endif
+};
+
+static int __init cs5530_init(void)
+{
+ return pci_register_driver(&cs5530_pci_driver);
+}
+
+static void __exit cs5530_exit(void)
+{
+ pci_unregister_driver(&cs5530_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5530);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5530_init);
+module_exit(cs5530_exit);
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
new file mode 100644
index 0000000..8b236af
--- /dev/null
+++ b/drivers/ata/pata_cs5535.c
@@ -0,0 +1,238 @@
+/*
+ * pata-cs5535.c - CS5535 PATA for new ATA layer
+ * (C) 2005-2006 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
+ * made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
+ * and Alexander Kiausch <alex.kiausch@t-online.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ * Available from AMD web site.
+ * TODO
+ * Review errata to see if serializing is necessary
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/msr.h>
+
+#define DRV_NAME "cs5535"
+#define DRV_VERSION "0.2.12"
+
+/*
+ * The Geode (Aka Athlon GX now) uses an internal MSR based
+ * bus system for control. Demented but there you go.
+ */
+
+#define MSR_ATAC_BASE 0x51300000
+#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
+#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
+#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
+#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
+#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
+#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
+#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
+#define ATAC_RESET (MSR_ATAC_BASE+0x10)
+#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
+#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
+#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
+#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
+#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
+
+#define ATAC_BM0_CMD_PRIM 0x00
+#define ATAC_BM0_STS_PRIM 0x02
+#define ATAC_BM0_PRD 0x04
+
+#define CS5535_CABLE_DETECT 0x48
+
+#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
+
+/**
+ * cs5535_cable_detect - detect cable type
+ * @ap: Port to detect on
+ *
+ * Perform cable detection for ATA66 capable cable. Return a libata
+ * cable type.
+ */
+
+static int cs5535_cable_detect(struct ata_port *ap)
+{
+ u8 cable;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
+ if (cable & 1)
+ return ATA_CBL_PATA80;
+ else
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * cs5535_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Set our PIO requirements. The CS5535 is pretty clean about all this
+ */
+
+static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u16 pio_timings[5] = {
+ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
+ };
+ static const u16 pio_cmd_timings[5] = {
+ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
+ };
+ u32 reg, dummy;
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ int mode = adev->pio_mode - XFER_PIO_0;
+ int cmdmode = mode;
+
+ /* Command timing has to be for the lowest of the pair of devices */
+ if (pair) {
+ int pairmode = pair->pio_mode - XFER_PIO_0;
+ cmdmode = min(mode, pairmode);
+ /* Write the other drive timing register if it changed */
+ if (cmdmode < pairmode)
+ wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
+ pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
+ }
+ /* Write the drive timing register */
+ wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
+ pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
+
+ /* Set the PIO "format 1" bit in the DMA timing register */
+ rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+ wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
+}
+
+/**
+ * cs5535_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ */
+
+static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u32 udma_timings[5] = {
+ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
+ };
+ static const u32 mwdma_timings[3] = {
+ 0x7F0FFFF3, 0x7F035352, 0x7F024241
+ };
+ u32 reg, dummy;
+ int mode = adev->dma_mode;
+
+ rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
+ reg &= 0x80000000UL;
+ if (mode >= XFER_UDMA_0)
+ reg |= udma_timings[mode - XFER_UDMA_0];
+ else
+ reg |= mwdma_timings[mode - XFER_MW_DMA_0];
+ wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
+}
+
+static struct scsi_host_template cs5535_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cs5535_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = cs5535_cable_detect,
+ .set_piomode = cs5535_set_piomode,
+ .set_dmamode = cs5535_set_dmamode,
+};
+
+/**
+ * cs5535_init_one - Initialise a CS5530
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Install a driver for the newly found CS5530 companion chip. Most of
+ * this is just housekeeping. We have to set the chip up correctly and
+ * turn off various bits of emulation magic.
+ */
+
+static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &cs5535_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+ u32 timings, dummy;
+
+ /* Check the BIOS set the initial timing clock. If not set the
+ timings for PIO0 */
+ rdmsr(ATAC_CH0D0_PIO, timings, dummy);
+ if (CS5535_BAD_PIO(timings))
+ wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
+ rdmsr(ATAC_CH0D1_PIO, timings, dummy);
+ if (CS5535_BAD_PIO(timings))
+ wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
+ return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL);
+}
+
+static const struct pci_device_id cs5535[] = {
+ { PCI_VDEVICE(NS, 0x002D), },
+
+ { },
+};
+
+static struct pci_driver cs5535_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cs5535,
+ .probe = cs5535_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init cs5535_init(void)
+{
+ return pci_register_driver(&cs5535_pci_driver);
+}
+
+static void __exit cs5535_exit(void)
+{
+ pci_unregister_driver(&cs5535_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5535);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cs5535_init);
+module_exit(cs5535_exit);
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
new file mode 100644
index 0000000..afed929
--- /dev/null
+++ b/drivers/ata/pata_cs5536.c
@@ -0,0 +1,301 @@
+/*
+ * pata_cs5536.c - CS5536 PATA for new ATA layer
+ * (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Documentation:
+ * Available from AMD web site.
+ *
+ * The IDE timing registers for the CS5536 live in the Geode Machine
+ * Specific Register file and not PCI config space. Most BIOSes
+ * virtualize the PCI registers so the chip looks like a standard IDE
+ * controller. Unfortunately not all implementations get this right.
+ * In particular some have problems with unaligned accesses to the
+ * virtualized PCI registers. This driver always does full dword
+ * writes to work around the issue. Also, in case of a bad BIOS this
+ * driver can be loaded with the "msr=1" parameter which forces using
+ * the Machine Specific Registers to configure the device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+#include <asm/msr.h>
+
+#define DRV_NAME "pata_cs5536"
+#define DRV_VERSION "0.0.7"
+
+enum {
+ CFG = 0,
+ DTC = 1,
+ CAST = 2,
+ ETC = 3,
+
+ MSR_IDE_BASE = 0x51300000,
+ MSR_IDE_CFG = (MSR_IDE_BASE + 0x10),
+ MSR_IDE_DTC = (MSR_IDE_BASE + 0x12),
+ MSR_IDE_CAST = (MSR_IDE_BASE + 0x13),
+ MSR_IDE_ETC = (MSR_IDE_BASE + 0x14),
+
+ PCI_IDE_CFG = 0x40,
+ PCI_IDE_DTC = 0x48,
+ PCI_IDE_CAST = 0x4c,
+ PCI_IDE_ETC = 0x50,
+
+ IDE_CFG_CHANEN = 0x2,
+ IDE_CFG_CABLE = 0x10000,
+
+ IDE_D0_SHIFT = 24,
+ IDE_D1_SHIFT = 16,
+ IDE_DRV_MASK = 0xff,
+
+ IDE_CAST_D0_SHIFT = 6,
+ IDE_CAST_D1_SHIFT = 4,
+ IDE_CAST_DRV_MASK = 0x3,
+ IDE_CAST_CMD_MASK = 0xff,
+ IDE_CAST_CMD_SHIFT = 24,
+
+ IDE_ETC_NODMA = 0x03,
+};
+
+static int use_msr;
+
+static const u32 msr_reg[4] = {
+ MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
+};
+
+static const u8 pci_reg[4] = {
+ PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+};
+
+static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+{
+ if (unlikely(use_msr)) {
+ u32 dummy;
+
+ rdmsr(msr_reg[reg], *val, dummy);
+ return 0;
+ }
+
+ return pci_read_config_dword(pdev, pci_reg[reg], val);
+}
+
+static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+{
+ if (unlikely(use_msr)) {
+ wrmsr(msr_reg[reg], val, 0);
+ return 0;
+ }
+
+ return pci_write_config_dword(pdev, pci_reg[reg], val);
+}
+
+/**
+ * cs5536_cable_detect - detect cable type
+ * @ap: Port to detect on
+ *
+ * Perform cable detection for ATA66 capable cable. Return a libata
+ * cable type.
+ */
+
+static int cs5536_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 cfg;
+
+ cs5536_read(pdev, CFG, &cfg);
+
+ if (cfg & (IDE_CFG_CABLE << ap->port_no))
+ return ATA_CBL_PATA80;
+ else
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * cs5536_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ */
+
+static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u8 drv_timings[5] = {
+ 0x98, 0x55, 0x32, 0x21, 0x20,
+ };
+
+ static const u8 addr_timings[5] = {
+ 0x2, 0x1, 0x0, 0x0, 0x0,
+ };
+
+ static const u8 cmd_timings[5] = {
+ 0x99, 0x92, 0x90, 0x22, 0x20,
+ };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_device *pair = ata_dev_pair(adev);
+ int mode = adev->pio_mode - XFER_PIO_0;
+ int cmdmode = mode;
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
+ u32 dtc, cast, etc;
+
+ if (pair)
+ cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
+
+ cs5536_read(pdev, DTC, &dtc);
+ cs5536_read(pdev, CAST, &cast);
+ cs5536_read(pdev, ETC, &etc);
+
+ dtc &= ~(IDE_DRV_MASK << dshift);
+ dtc |= drv_timings[mode] << dshift;
+
+ cast &= ~(IDE_CAST_DRV_MASK << cshift);
+ cast |= addr_timings[mode] << cshift;
+
+ cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
+ cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
+
+ etc &= ~(IDE_DRV_MASK << dshift);
+ etc |= IDE_ETC_NODMA << dshift;
+
+ cs5536_write(pdev, DTC, dtc);
+ cs5536_write(pdev, CAST, cast);
+ cs5536_write(pdev, ETC, etc);
+}
+
+/**
+ * cs5536_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ */
+
+static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u8 udma_timings[6] = {
+ 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
+ };
+
+ static const u8 mwdma_timings[3] = {
+ 0x67, 0x21, 0x20,
+ };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 dtc, etc;
+ int mode = adev->dma_mode;
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+
+ if (mode >= XFER_UDMA_0) {
+ cs5536_read(pdev, ETC, &etc);
+
+ etc &= ~(IDE_DRV_MASK << dshift);
+ etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
+
+ cs5536_write(pdev, ETC, etc);
+ } else { /* MWDMA */
+ cs5536_read(pdev, DTC, &dtc);
+
+ dtc &= ~(IDE_DRV_MASK << dshift);
+ dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
+
+ cs5536_write(pdev, DTC, dtc);
+ }
+}
+
+static struct scsi_host_template cs5536_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cs5536_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = cs5536_cable_detect,
+ .set_piomode = cs5536_set_piomode,
+ .set_dmamode = cs5536_set_dmamode,
+};
+
+/**
+ * cs5536_init_one
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ */
+
+static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &cs5536_port_ops,
+ };
+
+ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+ u32 cfg;
+
+ if (use_msr)
+ printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
+
+ cs5536_read(dev, CFG, &cfg);
+
+ if ((cfg & IDE_CFG_CHANEN) == 0) {
+ printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
+ return -ENODEV;
+ }
+
+ return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL);
+}
+
+static const struct pci_device_id cs5536[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { },
+};
+
+static struct pci_driver cs5536_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cs5536,
+ .probe = cs5536_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init cs5536_init(void)
+{
+ return pci_register_driver(&cs5536_pci_driver);
+}
+
+static void __exit cs5536_exit(void)
+{
+ pci_unregister_driver(&cs5536_pci_driver);
+}
+
+MODULE_AUTHOR("Martin K. Petersen");
+MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cs5536);
+MODULE_VERSION(DRV_VERSION);
+module_param_named(msr, use_msr, int, 0644);
+MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
+
+module_init(cs5536_init);
+module_exit(cs5536_exit);
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
new file mode 100644
index 0000000..d546425
--- /dev/null
+++ b/drivers/ata/pata_cypress.c
@@ -0,0 +1,178 @@
+/*
+ * pata_cypress.c - Cypress PATA for new ATA layer
+ * (C) 2006 Red Hat Inc
+ * Alan Cox
+ *
+ * Based heavily on
+ * linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_cypress"
+#define DRV_VERSION "0.1.5"
+
+/* here are the offset definitions for the registers */
+
+enum {
+ CY82_IDE_CMDREG = 0x04,
+ CY82_IDE_ADDRSETUP = 0x48,
+ CY82_IDE_MASTER_IOR = 0x4C,
+ CY82_IDE_MASTER_IOW = 0x4D,
+ CY82_IDE_SLAVE_IOR = 0x4E,
+ CY82_IDE_SLAVE_IOW = 0x4F,
+ CY82_IDE_MASTER_8BIT = 0x50,
+ CY82_IDE_SLAVE_8BIT = 0x51,
+
+ CY82_INDEX_PORT = 0x22,
+ CY82_DATA_PORT = 0x23,
+
+ CY82_INDEX_CTRLREG1 = 0x01,
+ CY82_INDEX_CHANNEL0 = 0x30,
+ CY82_INDEX_CHANNEL1 = 0x31,
+ CY82_INDEX_TIMEOUT = 0x32
+};
+
+/**
+ * cy82c693_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup.
+ */
+
+static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_timing t;
+ const unsigned long T = 1000000 / 33;
+ short time_16, time_8;
+ u32 addr;
+
+ if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
+ printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
+ return;
+ }
+
+ time_16 = clamp_val(t.recover, 0, 15) | (clamp_val(t.active, 0, 15) << 4);
+ time_8 = clamp_val(t.act8b, 0, 15) | (clamp_val(t.rec8b, 0, 15) << 4);
+
+ if (adev->devno == 0) {
+ pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+ addr &= ~0x0F; /* Mask bits */
+ addr |= clamp_val(t.setup, 0, 15);
+
+ pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+ pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
+ pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
+ pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
+ } else {
+ pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
+
+ addr &= ~0xF0; /* Mask bits */
+ addr |= (clamp_val(t.setup, 0, 15) << 4);
+
+ pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
+ pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
+ pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
+ pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
+ }
+}
+
+/**
+ * cy82c693_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the DMA mode setup.
+ */
+
+static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
+
+ /* Be afraid, be very afraid. Magic registers in low I/O space */
+ outb(reg, 0x22);
+ outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
+
+ /* 0x50 gives the best behaviour on the Alpha's using this chip */
+ outb(CY82_INDEX_TIMEOUT, 0x22);
+ outb(0x50, 0x23);
+}
+
+static struct scsi_host_template cy82c693_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations cy82c693_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = cy82c693_set_piomode,
+ .set_dmamode = cy82c693_set_dmamode,
+};
+
+static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &cy82c693_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+ /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
+ For the moment we don't handle the secondary. FIXME */
+
+ if (PCI_FUNC(pdev->devfn) != 1)
+ return -ENODEV;
+
+ return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL);
+}
+
+static const struct pci_device_id cy82c693[] = {
+ { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), },
+
+ { },
+};
+
+static struct pci_driver cy82c693_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = cy82c693,
+ .probe = cy82c693_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init cy82c693_init(void)
+{
+ return pci_register_driver(&cy82c693_pci_driver);
+}
+
+
+static void __exit cy82c693_exit(void)
+{
+ pci_unregister_driver(&cy82c693_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, cy82c693);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(cy82c693_init);
+module_exit(cy82c693_exit);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
new file mode 100644
index 0000000..ac6392e
--- /dev/null
+++ b/drivers/ata/pata_efar.c
@@ -0,0 +1,303 @@
+/*
+ * pata_efar.c - EFAR PIIX clone controller driver
+ *
+ * (C) 2005 Red Hat
+ *
+ * Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ * The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
+ * Intel ICH controllers the EFAR widened the UDMA mode register bits
+ * and doesn't require the funky clock selection.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_efar"
+#define DRV_VERSION "0.4.4"
+
+/**
+ * efar_pre_reset - Enable bits
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform cable detection for the EFAR ATA interface. This is
+ * different to the PIIX arrangement
+ */
+
+static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits efar_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+ };
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * efar_cable_detect - check for 40/80 pin
+ * @ap: Port
+ *
+ * Perform cable detection for the EFAR ATA interface. This is
+ * different to the PIIX arrangement
+ */
+
+static int efar_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 tmp;
+
+ pci_read_config_byte(pdev, 0x47, &tmp);
+ if (tmp & (2 >> ap->port_no))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * efar_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+ u16 idetm_data;
+ int control = 0;
+
+ /*
+ * See Intel Document 298600-004 for the timing programing rules
+ * for PIIX/ICH. The EFAR is a clone so very similar
+ */
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio > 2)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
+ control |= 2; /* IE enable */
+ /* Intel specifies that the PPE functionality is for disk only */
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE enable */
+
+ pci_read_config_word(dev, idetm_port, &idetm_data);
+
+ /* Enable PPE, IE and TIME as appropriate */
+
+ if (adev->devno == 0) {
+ idetm_data &= 0xCCF0;
+ idetm_data |= control;
+ idetm_data |= (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ } else {
+ int shift = 4 * ap->port_no;
+ u8 slave_data;
+
+ idetm_data &= 0xCC0F;
+ idetm_data |= (control << 4);
+
+ /* Slave timing in separate register */
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= 0x0F << shift;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
+ pci_write_config_byte(dev, 0x44, slave_data);
+ }
+
+ idetm_data |= 0x4000; /* Ensure SITRE is enabled */
+ pci_write_config_word(dev, idetm_port, idetm_data);
+}
+
+/**
+ * efar_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u8 master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
+ u8 speed = adev->dma_mode;
+ int devid = adev->devno + 2 * ap->port_no;
+ u8 udma_enable;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, master_port, &master_data);
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (speed >= XFER_UDMA_0) {
+ unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ u16 udma_timing;
+
+ udma_enable |= (1 << devid);
+
+ /* Load the UDMA mode number */
+ pci_read_config_word(dev, 0x4A, &udma_timing);
+ udma_timing &= ~(7 << (4 * devid));
+ udma_timing |= udma << (4 * devid);
+ pci_write_config_word(dev, 0x4A, udma_timing);
+ } else {
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ u8 slave_data;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ control = 3; /* IORDY|TIME1 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ if (adev->devno) { /* Slave */
+ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
+ master_data |= control << 4;
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= (0x0F + 0xE1 * ap->port_no);
+ /* Load the matching timing */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+ pci_write_config_byte(dev, 0x44, slave_data);
+ } else { /* Master */
+ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
+ and master timing bits */
+ master_data |= control;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ udma_enable &= ~(1 << devid);
+ pci_write_config_word(dev, master_port, master_data);
+ }
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct scsi_host_template efar_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations efar_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = efar_cable_detect,
+ .set_piomode = efar_set_piomode,
+ .set_dmamode = efar_set_dmamode,
+ .prereset = efar_pre_reset,
+};
+
+
+/**
+ * efar_init_one - Register EFAR ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in efar_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma1-2 */
+ .udma_mask = 0x0f, /* UDMA 66 */
+ .port_ops = &efar_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL);
+}
+
+static const struct pci_device_id efar_pci_tbl[] = {
+ { PCI_VDEVICE(EFAR, 0x9130), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver efar_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = efar_pci_tbl,
+ .probe = efar_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init efar_init(void)
+{
+ return pci_register_driver(&efar_pci_driver);
+}
+
+static void __exit efar_exit(void)
+{
+ pci_unregister_driver(&efar_pci_driver);
+}
+
+module_init(efar_init);
+module_exit(efar_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
new file mode 100644
index 0000000..e0c4f05
--- /dev/null
+++ b/drivers/ata/pata_hpt366.c
@@ -0,0 +1,453 @@
+/*
+ * Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
+ *
+ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001 Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003 Red Hat Inc
+ *
+ *
+ * TODO
+ * Maybe PLL mode
+ * Look into engine reset on timeout errors. Should not be
+ * required.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_hpt366"
+#define DRV_VERSION "0.6.2"
+
+struct hpt_clock {
+ u8 xfer_speed;
+ u32 timing;
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ * register access.
+ * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
+ * register access.
+ * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ * during task file register access.
+ * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ * xfer.
+ * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ * register access.
+ * 28 UDMA enable
+ * 29 DMA enable
+ * 30 PIO_MST enable. if set, the chip is in bus master mode during
+ * PIO.
+ * 31 FIFO enable.
+ */
+
+static const struct hpt_clock hpt366_40[] = {
+ { XFER_UDMA_4, 0x900fd943 },
+ { XFER_UDMA_3, 0x900ad943 },
+ { XFER_UDMA_2, 0x900bd943 },
+ { XFER_UDMA_1, 0x9008d943 },
+ { XFER_UDMA_0, 0x9008d943 },
+
+ { XFER_MW_DMA_2, 0xa008d943 },
+ { XFER_MW_DMA_1, 0xa010d955 },
+ { XFER_MW_DMA_0, 0xa010d9fc },
+
+ { XFER_PIO_4, 0xc008d963 },
+ { XFER_PIO_3, 0xc010d974 },
+ { XFER_PIO_2, 0xc010d997 },
+ { XFER_PIO_1, 0xc010d9c7 },
+ { XFER_PIO_0, 0xc018d9d9 },
+ { 0, 0x0120d9d9 }
+};
+
+static const struct hpt_clock hpt366_33[] = {
+ { XFER_UDMA_4, 0x90c9a731 },
+ { XFER_UDMA_3, 0x90cfa731 },
+ { XFER_UDMA_2, 0x90caa731 },
+ { XFER_UDMA_1, 0x90cba731 },
+ { XFER_UDMA_0, 0x90c8a731 },
+
+ { XFER_MW_DMA_2, 0xa0c8a731 },
+ { XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
+ { XFER_MW_DMA_0, 0xa0c8a797 },
+
+ { XFER_PIO_4, 0xc0c8a731 },
+ { XFER_PIO_3, 0xc0c8a742 },
+ { XFER_PIO_2, 0xc0d0a753 },
+ { XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
+ { XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
+ { 0, 0x0120a7a7 }
+};
+
+static const struct hpt_clock hpt366_25[] = {
+ { XFER_UDMA_4, 0x90c98521 },
+ { XFER_UDMA_3, 0x90cf8521 },
+ { XFER_UDMA_2, 0x90cf8521 },
+ { XFER_UDMA_1, 0x90cb8521 },
+ { XFER_UDMA_0, 0x90cb8521 },
+
+ { XFER_MW_DMA_2, 0xa0ca8521 },
+ { XFER_MW_DMA_1, 0xa0ca8532 },
+ { XFER_MW_DMA_0, 0xa0ca8575 },
+
+ { XFER_PIO_4, 0xc0ca8521 },
+ { XFER_PIO_3, 0xc0ca8532 },
+ { XFER_PIO_2, 0xc0ca8542 },
+ { XFER_PIO_1, 0xc0d08572 },
+ { XFER_PIO_0, 0xc0d08585 },
+ { 0, 0x01208585 }
+};
+
+static const char *bad_ata33[] = {
+ "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
+ "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+ "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+ "Maxtor 90510D4",
+ "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+ "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+ "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+ NULL
+};
+
+static const char *bad_ata66_4[] = {
+ "IBM-DTLA-307075",
+ "IBM-DTLA-307060",
+ "IBM-DTLA-307045",
+ "IBM-DTLA-307030",
+ "IBM-DTLA-307020",
+ "IBM-DTLA-307015",
+ "IBM-DTLA-305040",
+ "IBM-DTLA-305030",
+ "IBM-DTLA-305020",
+ "IC35L010AVER07-0",
+ "IC35L020AVER07-0",
+ "IC35L030AVER07-0",
+ "IC35L040AVER07-0",
+ "IC35L060AVER07-0",
+ "WDC AC310200R",
+ NULL
+};
+
+static const char *bad_ata66_3[] = {
+ "WDC AC310200R",
+ NULL
+};
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+{
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ int i = 0;
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ while (list[i] != NULL) {
+ if (!strcmp(list[i], model_num)) {
+ printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
+ modestr, list[i]);
+ return 1;
+ }
+ i++;
+ }
+ return 0;
+}
+
+/**
+ * hpt366_filter - mode selection filter
+ * @adev: ATA device
+ *
+ * Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (adev->class == ATA_DEV_ATA) {
+ if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ mask &= ~ATA_MASK_UDMA;
+ if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
+ mask &= ~(0xF8 << ATA_SHIFT_UDMA);
+ if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
+ mask &= ~(0xF0 << ATA_SHIFT_UDMA);
+ } else if (adev->class == ATA_DEV_ATAPI)
+ mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
+
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * hpt36x_find_mode - reset the hpt36x bus
+ * @ap: ATA port
+ * @speed: transfer mode
+ *
+ * Return the 32bit register programming information for this channel
+ * that matches the speed provided.
+ */
+
+static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
+{
+ struct hpt_clock *clocks = ap->host->private_data;
+
+ while(clocks->xfer_speed) {
+ if (clocks->xfer_speed == speed)
+ return clocks->timing;
+ clocks++;
+ }
+ BUG();
+ return 0xffffffffU; /* silence compiler warning */
+}
+
+static int hpt36x_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 ata66;
+
+ /*
+ * Each channel of pata_hpt366 occupies separate PCI function
+ * as the primary channel and bit1 indicates the cable type.
+ */
+ pci_read_config_byte(pdev, 0x5A, &ata66);
+ if (ata66 & 2)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * hpt366_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ if (fast & 0x80) {
+ fast &= ~0x80;
+ pci_write_config_byte(pdev, addr2, fast);
+ }
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt36x_find_mode(ap, adev->pio_mode);
+ mode &= ~0x8000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt366_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * Set up the channel for MWDMA or UDMA modes. Much the same as with
+ * PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ if (fast & 0x80) {
+ fast &= ~0x80;
+ pci_write_config_byte(pdev, addr2, fast);
+ }
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt36x_find_mode(ap, adev->dma_mode);
+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ reg &= 0xC0000000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+static struct scsi_host_template hpt36x_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Configuration for HPT366/68
+ */
+
+static struct ata_port_operations hpt366_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = hpt36x_cable_detect,
+ .mode_filter = hpt366_filter,
+ .set_piomode = hpt366_set_piomode,
+ .set_dmamode = hpt366_set_dmamode,
+};
+
+/**
+ * hpt36x_init_chipset - common chip setup
+ * @dev: PCI device
+ *
+ * Perform the chip setup work that must be done at both init and
+ * resume time
+ */
+
+static void hpt36x_init_chipset(struct pci_dev *dev)
+{
+ u8 drive_fast;
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+ pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+ pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+ pci_read_config_byte(dev, 0x51, &drive_fast);
+ if (drive_fast & 0x80)
+ pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
+}
+
+/**
+ * hpt36x_init_one - Initialise an HPT366/368
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Initialise an HPT36x device. There are some interesting complications
+ * here. Firstly the chip may report 366 and be one of several variants.
+ * Secondly all the timings depend on the clock for the chip which we must
+ * detect and look up
+ *
+ * This is the known chip mappings. It may be missing a couple of later
+ * releases.
+ *
+ * Chip version PCI Rev Notes
+ * HPT366 4 (HPT366) 0 UDMA66
+ * HPT366 4 (HPT366) 1 UDMA66
+ * HPT368 4 (HPT366) 2 UDMA66
+ * HPT37x/30x 4 (HPT366) 3+ Other driver
+ *
+ */
+
+static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info_hpt366 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &hpt366_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
+
+ void *hpriv = NULL;
+ u32 class_rev;
+ u32 reg1;
+ int rc;
+
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
+ /* May be a later chip in disguise. Check */
+ /* Newer chips are not in the HPT36x driver. Ignore them */
+ if (class_rev > 2)
+ return -ENODEV;
+
+ hpt36x_init_chipset(dev);
+
+ pci_read_config_dword(dev, 0x40, &reg1);
+
+ /* PCI clocking determines the ATA timing values to use */
+ /* info_hpt366 is safe against re-entry so we can scribble on it */
+ switch((reg1 & 0x700) >> 8) {
+ case 9:
+ hpriv = &hpt366_40;
+ break;
+ case 5:
+ hpriv = &hpt366_25;
+ break;
+ default:
+ hpriv = &hpt366_33;
+ break;
+ }
+ /* Now kick off ATA set up */
+ return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv);
+}
+
+#ifdef CONFIG_PM
+static int hpt36x_reinit_one(struct pci_dev *dev)
+{
+ struct ata_host *host = dev_get_drvdata(&dev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(dev);
+ if (rc)
+ return rc;
+ hpt36x_init_chipset(dev);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id hpt36x[] = {
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+ { },
+};
+
+static struct pci_driver hpt36x_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = hpt36x,
+ .probe = hpt36x_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = hpt36x_reinit_one,
+#endif
+};
+
+static int __init hpt36x_init(void)
+{
+ return pci_register_driver(&hpt36x_pci_driver);
+}
+
+static void __exit hpt36x_exit(void)
+{
+ pci_unregister_driver(&hpt36x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt36x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt36x_init);
+module_exit(hpt36x_exit);
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
new file mode 100644
index 0000000..4216399
--- /dev/null
+++ b/drivers/ata/pata_hpt37x.c
@@ -0,0 +1,1059 @@
+/*
+ * Libata driver for the highpoint 37x and 30x UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
+ *
+ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001 Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003 Red Hat Inc
+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
+ *
+ * TODO
+ * Look into engine reset on timeout errors. Should not be required.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_hpt37x"
+#define DRV_VERSION "0.6.11"
+
+struct hpt_clock {
+ u8 xfer_speed;
+ u32 timing;
+};
+
+struct hpt_chip {
+ const char *name;
+ unsigned int base;
+ struct hpt_clock const *clocks[4];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ * register access.
+ * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
+ * register access.
+ * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ * during task file register access.
+ * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ * xfer.
+ * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ * register access.
+ * 28 UDMA enable
+ * 29 DMA enable
+ * 30 PIO_MST enable. if set, the chip is in bus master mode during
+ * PIO.
+ * 31 FIFO enable.
+ */
+
+static struct hpt_clock hpt37x_timings_33[] = {
+ { XFER_UDMA_6, 0x12446231 }, /* 0x12646231 ?? */
+ { XFER_UDMA_5, 0x12446231 },
+ { XFER_UDMA_4, 0x12446231 },
+ { XFER_UDMA_3, 0x126c6231 },
+ { XFER_UDMA_2, 0x12486231 },
+ { XFER_UDMA_1, 0x124c6233 },
+ { XFER_UDMA_0, 0x12506297 },
+
+ { XFER_MW_DMA_2, 0x22406c31 },
+ { XFER_MW_DMA_1, 0x22406c33 },
+ { XFER_MW_DMA_0, 0x22406c97 },
+
+ { XFER_PIO_4, 0x06414e31 },
+ { XFER_PIO_3, 0x06414e42 },
+ { XFER_PIO_2, 0x06414e53 },
+ { XFER_PIO_1, 0x06814e93 },
+ { XFER_PIO_0, 0x06814ea7 }
+};
+
+static struct hpt_clock hpt37x_timings_50[] = {
+ { XFER_UDMA_6, 0x12848242 },
+ { XFER_UDMA_5, 0x12848242 },
+ { XFER_UDMA_4, 0x12ac8242 },
+ { XFER_UDMA_3, 0x128c8242 },
+ { XFER_UDMA_2, 0x120c8242 },
+ { XFER_UDMA_1, 0x12148254 },
+ { XFER_UDMA_0, 0x121882ea },
+
+ { XFER_MW_DMA_2, 0x22808242 },
+ { XFER_MW_DMA_1, 0x22808254 },
+ { XFER_MW_DMA_0, 0x228082ea },
+
+ { XFER_PIO_4, 0x0a81f442 },
+ { XFER_PIO_3, 0x0a81f443 },
+ { XFER_PIO_2, 0x0a81f454 },
+ { XFER_PIO_1, 0x0ac1f465 },
+ { XFER_PIO_0, 0x0ac1f48a }
+};
+
+static struct hpt_clock hpt37x_timings_66[] = {
+ { XFER_UDMA_6, 0x1c869c62 },
+ { XFER_UDMA_5, 0x1cae9c62 }, /* 0x1c8a9c62 */
+ { XFER_UDMA_4, 0x1c8a9c62 },
+ { XFER_UDMA_3, 0x1c8e9c62 },
+ { XFER_UDMA_2, 0x1c929c62 },
+ { XFER_UDMA_1, 0x1c9a9c62 },
+ { XFER_UDMA_0, 0x1c829c62 },
+
+ { XFER_MW_DMA_2, 0x2c829c62 },
+ { XFER_MW_DMA_1, 0x2c829c66 },
+ { XFER_MW_DMA_0, 0x2c829d2e },
+
+ { XFER_PIO_4, 0x0c829c62 },
+ { XFER_PIO_3, 0x0c829c84 },
+ { XFER_PIO_2, 0x0c829ca6 },
+ { XFER_PIO_1, 0x0d029d26 },
+ { XFER_PIO_0, 0x0d029d5e }
+};
+
+
+static const struct hpt_chip hpt370 = {
+ "HPT370",
+ 48,
+ {
+ hpt37x_timings_33,
+ NULL,
+ NULL,
+ NULL
+ }
+};
+
+static const struct hpt_chip hpt370a = {
+ "HPT370A",
+ 48,
+ {
+ hpt37x_timings_33,
+ NULL,
+ hpt37x_timings_50,
+ NULL
+ }
+};
+
+static const struct hpt_chip hpt372 = {
+ "HPT372",
+ 55,
+ {
+ hpt37x_timings_33,
+ NULL,
+ hpt37x_timings_50,
+ hpt37x_timings_66
+ }
+};
+
+static const struct hpt_chip hpt302 = {
+ "HPT302",
+ 66,
+ {
+ hpt37x_timings_33,
+ NULL,
+ hpt37x_timings_50,
+ hpt37x_timings_66
+ }
+};
+
+static const struct hpt_chip hpt371 = {
+ "HPT371",
+ 66,
+ {
+ hpt37x_timings_33,
+ NULL,
+ hpt37x_timings_50,
+ hpt37x_timings_66
+ }
+};
+
+static const struct hpt_chip hpt372a = {
+ "HPT372A",
+ 66,
+ {
+ hpt37x_timings_33,
+ NULL,
+ hpt37x_timings_50,
+ hpt37x_timings_66
+ }
+};
+
+static const struct hpt_chip hpt374 = {
+ "HPT374",
+ 48,
+ {
+ hpt37x_timings_33,
+ NULL,
+ NULL,
+ NULL
+ }
+};
+
+/**
+ * hpt37x_find_mode - reset the hpt37x bus
+ * @ap: ATA port
+ * @speed: transfer mode
+ *
+ * Return the 32bit register programming information for this channel
+ * that matches the speed provided.
+ */
+
+static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
+{
+ struct hpt_clock *clocks = ap->host->private_data;
+
+ while(clocks->xfer_speed) {
+ if (clocks->xfer_speed == speed)
+ return clocks->timing;
+ clocks++;
+ }
+ BUG();
+ return 0xffffffffU; /* silence compiler warning */
+}
+
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+{
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ int i = 0;
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ while (list[i] != NULL) {
+ if (!strcmp(list[i], model_num)) {
+ printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
+ modestr, list[i]);
+ return 1;
+ }
+ i++;
+ }
+ return 0;
+}
+
+static const char *bad_ata33[] = {
+ "Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
+ "Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+ "Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+ "Maxtor 90510D4",
+ "Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
+ "Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+ "Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+ NULL
+};
+
+static const char *bad_ata100_5[] = {
+ "IBM-DTLA-307075",
+ "IBM-DTLA-307060",
+ "IBM-DTLA-307045",
+ "IBM-DTLA-307030",
+ "IBM-DTLA-307020",
+ "IBM-DTLA-307015",
+ "IBM-DTLA-305040",
+ "IBM-DTLA-305030",
+ "IBM-DTLA-305020",
+ "IC35L010AVER07-0",
+ "IC35L020AVER07-0",
+ "IC35L030AVER07-0",
+ "IC35L040AVER07-0",
+ "IC35L060AVER07-0",
+ "WDC AC310200R",
+ NULL
+};
+
+/**
+ * hpt370_filter - mode selection filter
+ * @adev: ATA device
+ *
+ * Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (adev->class == ATA_DEV_ATA) {
+ if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
+ mask &= ~ATA_MASK_UDMA;
+ if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+ }
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * hpt370a_filter - mode selection filter
+ * @adev: ATA device
+ *
+ * Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (adev->class == ATA_DEV_ATA) {
+ if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
+ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+ }
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * hpt37x_pre_reset - reset the hpt37x bus
+ * @link: ATA link to reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the initial reset handling for the 370/372 and 374 func 0
+ */
+
+static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ u8 scr2, ata66;
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits hpt37x_enable_bits[] = {
+ { 0x50, 1, 0x04, 0x04 },
+ { 0x54, 1, 0x04, 0x04 }
+ };
+ if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ pci_read_config_byte(pdev, 0x5B, &scr2);
+ pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+ /* Cable register now active */
+ pci_read_config_byte(pdev, 0x5A, &ata66);
+ /* Restore state */
+ pci_write_config_byte(pdev, 0x5B, scr2);
+
+ if (ata66 & (2 >> ap->port_no))
+ ap->cbl = ATA_CBL_PATA40;
+ else
+ ap->cbl = ATA_CBL_PATA80;
+
+ /* Reset the state machine */
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(100);
+
+ return ata_sff_prereset(link, deadline);
+}
+
+static int hpt374_fn1_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits hpt37x_enable_bits[] = {
+ { 0x50, 1, 0x04, 0x04 },
+ { 0x54, 1, 0x04, 0x04 }
+ };
+ u16 mcr3;
+ u8 ata66;
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned int mcrbase = 0x50 + 4 * ap->port_no;
+
+ if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ /* Do the extra channel work */
+ pci_read_config_word(pdev, mcrbase + 2, &mcr3);
+ /* Set bit 15 of 0x52 to enable TCBLID as input
+ */
+ pci_write_config_word(pdev, mcrbase + 2, mcr3 | 0x8000);
+ pci_read_config_byte(pdev, 0x5A, &ata66);
+ /* Reset TCBLID/FCBLID to output */
+ pci_write_config_word(pdev, mcrbase + 2, mcr3);
+
+ if (ata66 & (2 >> ap->port_no))
+ ap->cbl = ATA_CBL_PATA40;
+ else
+ ap->cbl = ATA_CBL_PATA80;
+
+ /* Reset the state machine */
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(100);
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * hpt370_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x02;
+ fast |= 0x01;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt37x_find_mode(ap, adev->pio_mode);
+ mode &= ~0x8000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt370_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * Set up the channel for MWDMA or UDMA modes. Much the same as with
+ * PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x02;
+ fast |= 0x01;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt37x_find_mode(ap, adev->dma_mode);
+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ reg &= 0xC0000000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt370_bmdma_start - DMA engine begin
+ * @qc: ATA command
+ *
+ * The 370 and 370A want us to reset the DMA engine each time we
+ * use it. The 372 and later are fine.
+ */
+
+static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(10);
+ ata_bmdma_start(qc);
+}
+
+/**
+ * hpt370_bmdma_end - DMA engine stop
+ * @qc: ATA command
+ *
+ * Work around the HPT370 DMA engine.
+ */
+
+static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 dma_stat = ioread8(ap->ioaddr.bmdma_addr + 2);
+ u8 dma_cmd;
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+ if (dma_stat & 0x01) {
+ udelay(20);
+ dma_stat = ioread8(bmdma + 2);
+ }
+ if (dma_stat & 0x01) {
+ /* Clear the engine */
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(10);
+ /* Stop DMA */
+ dma_cmd = ioread8(bmdma );
+ iowrite8(dma_cmd & 0xFE, bmdma);
+ /* Clear Error */
+ dma_stat = ioread8(bmdma + 2);
+ iowrite8(dma_stat | 0x06 , bmdma + 2);
+ /* Clear the engine */
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(10);
+ }
+ ata_bmdma_stop(qc);
+}
+
+/**
+ * hpt372_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x07;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt37x_find_mode(ap, adev->pio_mode);
+
+ printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
+ mode &= ~0x80000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt372_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * Set up the channel for MWDMA or UDMA modes. Much the same as with
+ * PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x07;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt37x_find_mode(ap, adev->dma_mode);
+ printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ mode |= 0x80000000; /* FIFO in MWDMA or UDMA */
+ reg &= 0xC0000000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt37x_bmdma_end - DMA engine stop
+ * @qc: ATA command
+ *
+ * Clean up after the HPT372 and later DMA engine
+ */
+
+static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int mscreg = 0x50 + 4 * ap->port_no;
+ u8 bwsr_stat, msc_stat;
+
+ pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+ pci_read_config_byte(pdev, mscreg, &msc_stat);
+ if (bwsr_stat & (1 << ap->port_no))
+ pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+ ata_bmdma_stop(qc);
+}
+
+
+static struct scsi_host_template hpt37x_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Configuration for HPT370
+ */
+
+static struct ata_port_operations hpt370_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_start = hpt370_bmdma_start,
+ .bmdma_stop = hpt370_bmdma_stop,
+
+ .mode_filter = hpt370_filter,
+ .set_piomode = hpt370_set_piomode,
+ .set_dmamode = hpt370_set_dmamode,
+ .prereset = hpt37x_pre_reset,
+};
+
+/*
+ * Configuration for HPT370A. Close to 370 but less filters
+ */
+
+static struct ata_port_operations hpt370a_port_ops = {
+ .inherits = &hpt370_port_ops,
+ .mode_filter = hpt370a_filter,
+};
+
+/*
+ * Configuration for HPT372, HPT371, HPT302. Slightly different PIO
+ * and DMA mode setting functionality.
+ */
+
+static struct ata_port_operations hpt372_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt37x_bmdma_stop,
+
+ .set_piomode = hpt372_set_piomode,
+ .set_dmamode = hpt372_set_dmamode,
+ .prereset = hpt37x_pre_reset,
+};
+
+/*
+ * Configuration for HPT374. Mode setting works like 372 and friends
+ * but we have a different cable detection procedure for function 1.
+ */
+
+static struct ata_port_operations hpt374_fn1_port_ops = {
+ .inherits = &hpt372_port_ops,
+ .prereset = hpt374_fn1_pre_reset,
+};
+
+/**
+ * htp37x_clock_slot - Turn timing to PC clock entry
+ * @freq: Reported frequency timing
+ * @base: Base timing
+ *
+ * Turn the timing data intoa clock slot (0 for 33, 1 for 40, 2 for 50
+ * and 3 for 66Mhz)
+ */
+
+static int hpt37x_clock_slot(unsigned int freq, unsigned int base)
+{
+ unsigned int f = (base * freq) / 192; /* Mhz */
+ if (f < 40)
+ return 0; /* 33Mhz slot */
+ if (f < 45)
+ return 1; /* 40Mhz slot */
+ if (f < 55)
+ return 2; /* 50Mhz slot */
+ return 3; /* 60Mhz slot */
+}
+
+/**
+ * hpt37x_calibrate_dpll - Calibrate the DPLL loop
+ * @dev: PCI device
+ *
+ * Perform a calibration cycle on the HPT37x DPLL. Returns 1 if this
+ * succeeds
+ */
+
+static int hpt37x_calibrate_dpll(struct pci_dev *dev)
+{
+ u8 reg5b;
+ u32 reg5c;
+ int tries;
+
+ for(tries = 0; tries < 0x5000; tries++) {
+ udelay(50);
+ pci_read_config_byte(dev, 0x5b, &reg5b);
+ if (reg5b & 0x80) {
+ /* See if it stays set */
+ for(tries = 0; tries < 0x1000; tries ++) {
+ pci_read_config_byte(dev, 0x5b, &reg5b);
+ /* Failed ? */
+ if ((reg5b & 0x80) == 0)
+ return 0;
+ }
+ /* Turn off tuning, we have the DPLL set */
+ pci_read_config_dword(dev, 0x5c, &reg5c);
+ pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+ return 1;
+ }
+ }
+ /* Never went stable */
+ return 0;
+}
+
+static u32 hpt374_read_freq(struct pci_dev *pdev)
+{
+ u32 freq;
+ unsigned long io_base = pci_resource_start(pdev, 4);
+ if (PCI_FUNC(pdev->devfn) & 1) {
+ struct pci_dev *pdev_0;
+
+ pdev_0 = pci_get_slot(pdev->bus, pdev->devfn - 1);
+ /* Someone hot plugged the controller on us ? */
+ if (pdev_0 == NULL)
+ return 0;
+ io_base = pci_resource_start(pdev_0, 4);
+ freq = inl(io_base + 0x90);
+ pci_dev_put(pdev_0);
+ } else
+ freq = inl(io_base + 0x90);
+ return freq;
+}
+
+/**
+ * hpt37x_init_one - Initialise an HPT37X/302
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Initialise an HPT37x device. There are some interesting complications
+ * here. Firstly the chip may report 366 and be one of several variants.
+ * Secondly all the timings depend on the clock for the chip which we must
+ * detect and look up
+ *
+ * This is the known chip mappings. It may be missing a couple of later
+ * releases.
+ *
+ * Chip version PCI Rev Notes
+ * HPT366 4 (HPT366) 0 Other driver
+ * HPT366 4 (HPT366) 1 Other driver
+ * HPT368 4 (HPT366) 2 Other driver
+ * HPT370 4 (HPT366) 3 UDMA100
+ * HPT370A 4 (HPT366) 4 UDMA100
+ * HPT372 4 (HPT366) 5 UDMA133 (1)
+ * HPT372N 4 (HPT366) 6 Other driver
+ * HPT372A 5 (HPT372) 1 UDMA133 (1)
+ * HPT372N 5 (HPT372) 2 Other driver
+ * HPT302 6 (HPT302) 1 UDMA133
+ * HPT302N 6 (HPT302) 2 Other driver
+ * HPT371 7 (HPT371) * UDMA133
+ * HPT374 8 (HPT374) * UDMA133 4 channel
+ * HPT372N 9 (HPT372N) * Other driver
+ *
+ * (1) UDMA133 support depends on the bus clock
+ */
+
+static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ /* HPT370 - UDMA100 */
+ static const struct ata_port_info info_hpt370 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt370_port_ops
+ };
+ /* HPT370A - UDMA100 */
+ static const struct ata_port_info info_hpt370a = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt370a_port_ops
+ };
+ /* HPT370 - UDMA100 */
+ static const struct ata_port_info info_hpt370_33 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt370_port_ops
+ };
+ /* HPT370A - UDMA100 */
+ static const struct ata_port_info info_hpt370a_33 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt370a_port_ops
+ };
+ /* HPT371, 372 and friends - UDMA133 */
+ static const struct ata_port_info info_hpt372 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &hpt372_port_ops
+ };
+ /* HPT374 - UDMA100, function 1 uses different prereset method */
+ static const struct ata_port_info info_hpt374_fn0 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt372_port_ops
+ };
+ static const struct ata_port_info info_hpt374_fn1 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &hpt374_fn1_port_ops
+ };
+
+ static const int MHz[4] = { 33, 40, 50, 66 };
+ void *private_data = NULL;
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+
+ u8 irqmask;
+ u32 class_rev;
+ u8 mcr1;
+ u32 freq;
+ int prefer_dpll = 1;
+
+ unsigned long iobase = pci_resource_start(dev, 4);
+
+ const struct hpt_chip *chip_table;
+ int clock_slot;
+ int rc;
+
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+ /* May be a later chip in disguise. Check */
+ /* Older chips are in the HPT366 driver. Ignore them */
+ if (class_rev < 3)
+ return -ENODEV;
+ /* N series chips have their own driver. Ignore */
+ if (class_rev == 6)
+ return -ENODEV;
+
+ switch(class_rev) {
+ case 3:
+ ppi[0] = &info_hpt370;
+ chip_table = &hpt370;
+ prefer_dpll = 0;
+ break;
+ case 4:
+ ppi[0] = &info_hpt370a;
+ chip_table = &hpt370a;
+ prefer_dpll = 0;
+ break;
+ case 5:
+ ppi[0] = &info_hpt372;
+ chip_table = &hpt372;
+ break;
+ default:
+ printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
+ return -ENODEV;
+ }
+ } else {
+ switch(dev->device) {
+ case PCI_DEVICE_ID_TTI_HPT372:
+ /* 372N if rev >= 2*/
+ if (class_rev >= 2)
+ return -ENODEV;
+ ppi[0] = &info_hpt372;
+ chip_table = &hpt372a;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT302:
+ /* 302N if rev > 1 */
+ if (class_rev > 1)
+ return -ENODEV;
+ ppi[0] = &info_hpt372;
+ /* Check this */
+ chip_table = &hpt302;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT371:
+ if (class_rev > 1)
+ return -ENODEV;
+ ppi[0] = &info_hpt372;
+ chip_table = &hpt371;
+ /* Single channel device, master is not present
+ but the BIOS (or us for non x86) must mark it
+ absent */
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ break;
+ case PCI_DEVICE_ID_TTI_HPT374:
+ chip_table = &hpt374;
+ if (!(PCI_FUNC(dev->devfn) & 1))
+ *ppi = &info_hpt374_fn0;
+ else
+ *ppi = &info_hpt374_fn1;
+ break;
+ default:
+ printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
+ return -ENODEV;
+ }
+ }
+ /* Ok so this is a chip we support */
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+ pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+ pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+ pci_read_config_byte(dev, 0x5A, &irqmask);
+ irqmask &= ~0x10;
+ pci_write_config_byte(dev, 0x5a, irqmask);
+
+ /*
+ * default to pci clock. make sure MA15/16 are set to output
+ * to prevent drives having problems with 40-pin cables. Needed
+ * for some drives such as IBM-DTLA which will not enter ready
+ * state on reset when PDIAG is a input.
+ */
+
+ pci_write_config_byte(dev, 0x5b, 0x23);
+
+ /*
+ * HighPoint does this for HPT372A.
+ * NOTE: This register is only writeable via I/O space.
+ */
+ if (chip_table == &hpt372a)
+ outb(0x0e, iobase + 0x9c);
+
+ /* Some devices do not let this value be accessed via PCI space
+ according to the old driver. In addition we must use the value
+ from FN 0 on the HPT374 */
+
+ if (chip_table == &hpt374) {
+ freq = hpt374_read_freq(dev);
+ if (freq == 0)
+ return -ENODEV;
+ } else
+ freq = inl(iobase + 0x90);
+
+ if ((freq >> 12) != 0xABCDE) {
+ int i;
+ u8 sr;
+ u32 total = 0;
+
+ printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
+
+ /* This is the process the HPT371 BIOS is reported to use */
+ for(i = 0; i < 128; i++) {
+ pci_read_config_byte(dev, 0x78, &sr);
+ total += sr & 0x1FF;
+ udelay(15);
+ }
+ freq = total / 128;
+ }
+ freq &= 0x1FF;
+
+ /*
+ * Turn the frequency check into a band and then find a timing
+ * table to match it.
+ */
+
+ clock_slot = hpt37x_clock_slot(freq, chip_table->base);
+ if (chip_table->clocks[clock_slot] == NULL || prefer_dpll) {
+ /*
+ * We need to try PLL mode instead
+ *
+ * For non UDMA133 capable devices we should
+ * use a 50MHz DPLL by choice
+ */
+ unsigned int f_low, f_high;
+ int dpll, adjust;
+
+ /* Compute DPLL */
+ dpll = (ppi[0]->udma_mask & 0xC0) ? 3 : 2;
+
+ f_low = (MHz[clock_slot] * 48) / MHz[dpll];
+ f_high = f_low + 2;
+ if (clock_slot > 1)
+ f_high += 2;
+
+ /* Select the DPLL clock. */
+ pci_write_config_byte(dev, 0x5b, 0x21);
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+
+ for(adjust = 0; adjust < 8; adjust++) {
+ if (hpt37x_calibrate_dpll(dev))
+ break;
+ /* See if it'll settle at a fractionally different clock */
+ if (adjust & 1)
+ f_low -= adjust >> 1;
+ else
+ f_high += adjust >> 1;
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+ }
+ if (adjust == 8) {
+ printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
+ return -ENODEV;
+ }
+ if (dpll == 3)
+ private_data = (void *)hpt37x_timings_66;
+ else
+ private_data = (void *)hpt37x_timings_50;
+
+ printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
+ MHz[clock_slot], MHz[dpll]);
+ } else {
+ private_data = (void *)chip_table->clocks[clock_slot];
+ /*
+ * Perform a final fixup. Note that we will have used the
+ * DPLL on the HPT372 which means we don't have to worry
+ * about lack of UDMA133 support on lower clocks
+ */
+
+ if (clock_slot < 2 && ppi[0] == &info_hpt370)
+ ppi[0] = &info_hpt370_33;
+ if (clock_slot < 2 && ppi[0] == &info_hpt370a)
+ ppi[0] = &info_hpt370a_33;
+ printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
+ chip_table->name, MHz[clock_slot]);
+ }
+
+ /* Now kick off ATA set up */
+ return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data);
+}
+
+static const struct pci_device_id hpt37x[] = {
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+
+ { },
+};
+
+static struct pci_driver hpt37x_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = hpt37x,
+ .probe = hpt37x_init_one,
+ .remove = ata_pci_remove_one
+};
+
+static int __init hpt37x_init(void)
+{
+ return pci_register_driver(&hpt37x_pci_driver);
+}
+
+static void __exit hpt37x_exit(void)
+{
+ pci_unregister_driver(&hpt37x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt37x);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt37x_init);
+module_exit(hpt37x_exit);
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
new file mode 100644
index 0000000..d5c9fd7
--- /dev/null
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -0,0 +1,594 @@
+/*
+ * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
+ *
+ * This driver is heavily based upon:
+ *
+ * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
+ *
+ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 2001 Sun Microsystems, Inc.
+ * Portions Copyright (C) 2003 Red Hat Inc
+ * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
+ *
+ *
+ * TODO
+ * Work out best PLL policy
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_hpt3x2n"
+#define DRV_VERSION "0.3.4"
+
+enum {
+ HPT_PCI_FAST = (1 << 31),
+ PCI66 = (1 << 1),
+ USE_DPLL = (1 << 0)
+};
+
+struct hpt_clock {
+ u8 xfer_speed;
+ u32 timing;
+};
+
+struct hpt_chip {
+ const char *name;
+ struct hpt_clock *clocks[3];
+};
+
+/* key for bus clock timings
+ * bit
+ * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
+ * DMA. cycles = value + 1
+ * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ * register access.
+ * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
+ * register access.
+ * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
+ * during task file register access.
+ * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
+ * xfer.
+ * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ * register access.
+ * 28 UDMA enable
+ * 29 DMA enable
+ * 30 PIO_MST enable. if set, the chip is in bus master mode during
+ * PIO.
+ * 31 FIFO enable.
+ */
+
+/* 66MHz DPLL clocks */
+
+static struct hpt_clock hpt3x2n_clocks[] = {
+ { XFER_UDMA_7, 0x1c869c62 },
+ { XFER_UDMA_6, 0x1c869c62 },
+ { XFER_UDMA_5, 0x1c8a9c62 },
+ { XFER_UDMA_4, 0x1c8a9c62 },
+ { XFER_UDMA_3, 0x1c8e9c62 },
+ { XFER_UDMA_2, 0x1c929c62 },
+ { XFER_UDMA_1, 0x1c9a9c62 },
+ { XFER_UDMA_0, 0x1c829c62 },
+
+ { XFER_MW_DMA_2, 0x2c829c62 },
+ { XFER_MW_DMA_1, 0x2c829c66 },
+ { XFER_MW_DMA_0, 0x2c829d2c },
+
+ { XFER_PIO_4, 0x0c829c62 },
+ { XFER_PIO_3, 0x0c829c84 },
+ { XFER_PIO_2, 0x0c829ca6 },
+ { XFER_PIO_1, 0x0d029d26 },
+ { XFER_PIO_0, 0x0d029d5e },
+ { 0, 0x0d029d5e }
+};
+
+/**
+ * hpt3x2n_find_mode - reset the hpt3x2n bus
+ * @ap: ATA port
+ * @speed: transfer mode
+ *
+ * Return the 32bit register programming information for this channel
+ * that matches the speed provided. For the moment the clocks table
+ * is hard coded but easy to change. This will be needed if we use
+ * different DPLLs
+ */
+
+static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
+{
+ struct hpt_clock *clocks = hpt3x2n_clocks;
+
+ while(clocks->xfer_speed) {
+ if (clocks->xfer_speed == speed)
+ return clocks->timing;
+ clocks++;
+ }
+ BUG();
+ return 0xffffffffU; /* silence compiler warning */
+}
+
+/**
+ * hpt3x2n_cable_detect - Detect the cable type
+ * @ap: ATA port to detect on
+ *
+ * Return the cable type attached to this port
+ */
+
+static int hpt3x2n_cable_detect(struct ata_port *ap)
+{
+ u8 scr2, ata66;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ pci_read_config_byte(pdev, 0x5B, &scr2);
+ pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
+ /* Cable register now active */
+ pci_read_config_byte(pdev, 0x5A, &ata66);
+ /* Restore state */
+ pci_write_config_byte(pdev, 0x5B, scr2);
+
+ if (ata66 & (1 << ap->port_no))
+ return ATA_CBL_PATA40;
+ else
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * hpt3x2n_pre_reset - reset the hpt3x2n bus
+ * @link: ATA link to reset
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the initial reset handling for the 3x2n series controllers.
+ * Reset the hardware and state machine,
+ */
+
+static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ /* Reset the state machine */
+ pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+ udelay(100);
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * hpt3x2n_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x07;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt3x2n_find_mode(ap, adev->pio_mode);
+ mode &= ~0x8000000; /* No FIFO in PIO */
+ mode &= ~0x30070000; /* Leave config bits alone */
+ reg &= 0x30070000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt3x2n_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * Set up the channel for MWDMA or UDMA modes. Much the same as with
+ * PIO, load the mode number and then set MWDMA or UDMA flag.
+ */
+
+static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 addr1, addr2;
+ u32 reg;
+ u32 mode;
+ u8 fast;
+
+ addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
+ addr2 = 0x51 + 4 * ap->port_no;
+
+ /* Fast interrupt prediction disable, hold off interrupt disable */
+ pci_read_config_byte(pdev, addr2, &fast);
+ fast &= ~0x07;
+ pci_write_config_byte(pdev, addr2, fast);
+
+ pci_read_config_dword(pdev, addr1, &reg);
+ mode = hpt3x2n_find_mode(ap, adev->dma_mode);
+ mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
+ mode &= ~0xC0000000; /* Leave config bits alone */
+ reg &= 0xC0000000; /* Strip timing bits */
+ pci_write_config_dword(pdev, addr1, reg | mode);
+}
+
+/**
+ * hpt3x2n_bmdma_end - DMA engine stop
+ * @qc: ATA command
+ *
+ * Clean up after the HPT3x2n and later DMA engine
+ */
+
+static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int mscreg = 0x50 + 2 * ap->port_no;
+ u8 bwsr_stat, msc_stat;
+
+ pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
+ pci_read_config_byte(pdev, mscreg, &msc_stat);
+ if (bwsr_stat & (1 << ap->port_no))
+ pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
+ ata_bmdma_stop(qc);
+}
+
+/**
+ * hpt3x2n_set_clock - clock control
+ * @ap: ATA port
+ * @source: 0x21 or 0x23 for PLL or PCI sourced clock
+ *
+ * Switch the ATA bus clock between the PLL and PCI clock sources
+ * while correctly isolating the bus and resetting internal logic
+ *
+ * We must use the DPLL for
+ * - writing
+ * - second channel UDMA7 (SATA ports) or higher
+ * - 66MHz PCI
+ *
+ * or we will underclock the device and get reduced performance.
+ */
+
+static void hpt3x2n_set_clock(struct ata_port *ap, int source)
+{
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+ /* Tristate the bus */
+ iowrite8(0x80, bmdma+0x73);
+ iowrite8(0x80, bmdma+0x77);
+
+ /* Switch clock and reset channels */
+ iowrite8(source, bmdma+0x7B);
+ iowrite8(0xC0, bmdma+0x79);
+
+ /* Reset state machines */
+ iowrite8(0x37, bmdma+0x70);
+ iowrite8(0x37, bmdma+0x74);
+
+ /* Complete reset */
+ iowrite8(0x00, bmdma+0x79);
+
+ /* Reconnect channels to bus */
+ iowrite8(0x00, bmdma+0x73);
+ iowrite8(0x00, bmdma+0x77);
+}
+
+/* Check if our partner interface is busy */
+
+static int hpt3x2n_pair_idle(struct ata_port *ap)
+{
+ struct ata_host *host = ap->host;
+ struct ata_port *pair = host->ports[ap->port_no ^ 1];
+
+ if (pair->hsm_task_state == HSM_ST_IDLE)
+ return 1;
+ return 0;
+}
+
+static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
+{
+ long flags = (long)ap->host->private_data;
+ /* See if we should use the DPLL */
+ if (writing)
+ return USE_DPLL; /* Needed for write */
+ if (flags & PCI66)
+ return USE_DPLL; /* Needed at 66Mhz */
+ return 0;
+}
+
+static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *tf = &qc->tf;
+ struct ata_port *ap = qc->ap;
+ int flags = (long)ap->host->private_data;
+
+ if (hpt3x2n_pair_idle(ap)) {
+ int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
+ if ((flags & USE_DPLL) != dpll) {
+ if (dpll == 1)
+ hpt3x2n_set_clock(ap, 0x21);
+ else
+ hpt3x2n_set_clock(ap, 0x23);
+ }
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template hpt3x2n_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Configuration for HPT3x2n.
+ */
+
+static struct ata_port_operations hpt3x2n_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .bmdma_stop = hpt3x2n_bmdma_stop,
+ .qc_issue = hpt3x2n_qc_issue,
+
+ .cable_detect = hpt3x2n_cable_detect,
+ .set_piomode = hpt3x2n_set_piomode,
+ .set_dmamode = hpt3x2n_set_dmamode,
+ .prereset = hpt3x2n_pre_reset,
+};
+
+/**
+ * hpt3xn_calibrate_dpll - Calibrate the DPLL loop
+ * @dev: PCI device
+ *
+ * Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
+ * succeeds
+ */
+
+static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
+{
+ u8 reg5b;
+ u32 reg5c;
+ int tries;
+
+ for(tries = 0; tries < 0x5000; tries++) {
+ udelay(50);
+ pci_read_config_byte(dev, 0x5b, &reg5b);
+ if (reg5b & 0x80) {
+ /* See if it stays set */
+ for(tries = 0; tries < 0x1000; tries ++) {
+ pci_read_config_byte(dev, 0x5b, &reg5b);
+ /* Failed ? */
+ if ((reg5b & 0x80) == 0)
+ return 0;
+ }
+ /* Turn off tuning, we have the DPLL set */
+ pci_read_config_dword(dev, 0x5c, &reg5c);
+ pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+ return 1;
+ }
+ }
+ /* Never went stable */
+ return 0;
+}
+
+static int hpt3x2n_pci_clock(struct pci_dev *pdev)
+{
+ unsigned long freq;
+ u32 fcnt;
+ unsigned long iobase = pci_resource_start(pdev, 4);
+
+ fcnt = inl(iobase + 0x90); /* Not PCI readable for some chips */
+ if ((fcnt >> 12) != 0xABCDE) {
+ printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
+ return 33; /* Not BIOS set */
+ }
+ fcnt &= 0x1FF;
+
+ freq = (fcnt * 77) / 192;
+
+ /* Clamp to bands */
+ if (freq < 40)
+ return 33;
+ if (freq < 45)
+ return 40;
+ if (freq < 55)
+ return 50;
+ return 66;
+}
+
+/**
+ * hpt3x2n_init_one - Initialise an HPT37X/302
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Initialise an HPT3x2n device. There are some interesting complications
+ * here. Firstly the chip may report 366 and be one of several variants.
+ * Secondly all the timings depend on the clock for the chip which we must
+ * detect and look up
+ *
+ * This is the known chip mappings. It may be missing a couple of later
+ * releases.
+ *
+ * Chip version PCI Rev Notes
+ * HPT372 4 (HPT366) 5 Other driver
+ * HPT372N 4 (HPT366) 6 UDMA133
+ * HPT372 5 (HPT372) 1 Other driver
+ * HPT372N 5 (HPT372) 2 UDMA133
+ * HPT302 6 (HPT302) * Other driver
+ * HPT302N 6 (HPT302) > 1 UDMA133
+ * HPT371 7 (HPT371) * Other driver
+ * HPT371N 7 (HPT371) > 1 UDMA133
+ * HPT374 8 (HPT374) * Other driver
+ * HPT372N 9 (HPT372N) * UDMA133
+ *
+ * (1) UDMA133 support depends on the bus clock
+ *
+ * To pin down HPT371N
+ */
+
+static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ /* HPT372N and friends - UDMA133 */
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &hpt3x2n_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ u8 irqmask;
+ u32 class_rev;
+
+ unsigned int pci_mhz;
+ unsigned int f_low, f_high;
+ int adjust;
+ unsigned long iobase = pci_resource_start(dev, 4);
+ void *hpriv = NULL;
+ int rc;
+
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xFF;
+
+ switch(dev->device) {
+ case PCI_DEVICE_ID_TTI_HPT366:
+ if (class_rev < 6)
+ return -ENODEV;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT371:
+ if (class_rev < 2)
+ return -ENODEV;
+ /* 371N if rev > 1 */
+ break;
+ case PCI_DEVICE_ID_TTI_HPT372:
+ /* 372N if rev >= 2*/
+ if (class_rev < 2)
+ return -ENODEV;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT302:
+ if (class_rev < 2)
+ return -ENODEV;
+ break;
+ case PCI_DEVICE_ID_TTI_HPT372N:
+ break;
+ default:
+ printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
+ return -ENODEV;
+ }
+
+ /* Ok so this is a chip we support */
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
+ pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
+ pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
+
+ pci_read_config_byte(dev, 0x5A, &irqmask);
+ irqmask &= ~0x10;
+ pci_write_config_byte(dev, 0x5a, irqmask);
+
+ /*
+ * HPT371 chips physically have only one channel, the secondary one,
+ * but the primary channel registers do exist! Go figure...
+ * So, we manually disable the non-existing channel here
+ * (if the BIOS hasn't done this already).
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+ u8 mcr1;
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ }
+
+ /* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
+ 50 for UDMA100. Right now we always use 66 */
+
+ pci_mhz = hpt3x2n_pci_clock(dev);
+
+ f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
+ f_high = f_low + 2; /* Tolerance */
+
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+ /* PLL clock */
+ pci_write_config_byte(dev, 0x5B, 0x21);
+
+ /* Unlike the 37x we don't try jiggling the frequency */
+ for(adjust = 0; adjust < 8; adjust++) {
+ if (hpt3xn_calibrate_dpll(dev))
+ break;
+ pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
+ }
+ if (adjust == 8) {
+ printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
+ pci_mhz);
+ /* Set our private data up. We only need a few flags so we use
+ it directly */
+ if (pci_mhz > 60) {
+ hpriv = (void *)PCI66;
+ /*
+ * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+ * the MISC. register to stretch the UltraDMA Tss timing.
+ * NOTE: This register is only writeable via I/O space.
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+ outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
+ }
+
+ /* Now kick off ATA set up */
+ return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv);
+}
+
+static const struct pci_device_id hpt3x2n[] = {
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), },
+
+ { },
+};
+
+static struct pci_driver hpt3x2n_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = hpt3x2n,
+ .probe = hpt3x2n_init_one,
+ .remove = ata_pci_remove_one
+};
+
+static int __init hpt3x2n_init(void)
+{
+ return pci_register_driver(&hpt3x2n_pci_driver);
+}
+
+static void __exit hpt3x2n_exit(void)
+{
+ pci_unregister_driver(&hpt3x2n_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x2n);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt3x2n_init);
+module_exit(hpt3x2n_exit);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
new file mode 100644
index 0000000..f11a320
--- /dev/null
+++ b/drivers/ata/pata_hpt3x3.c
@@ -0,0 +1,260 @@
+/*
+ * pata_hpt3x3 - HPT3x3 driver
+ * (c) Copyright 2005-2006 Red Hat
+ *
+ * Was pata_hpt34x but the naming was confusing as it supported the
+ * 343 and 363 so it has been renamed.
+ *
+ * Based on:
+ * linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_hpt3x3"
+#define DRV_VERSION "0.5.3"
+
+/**
+ * hpt3x3_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Set our PIO requirements. This is fairly simple on the HPT3x3 as
+ * all we have to do is clear the MWDMA and UDMA bits then load the
+ * mode number.
+ */
+
+static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 r1, r2;
+ int dn = 2 * ap->port_no + adev->devno;
+
+ pci_read_config_dword(pdev, 0x44, &r1);
+ pci_read_config_dword(pdev, 0x48, &r2);
+ /* Load the PIO timing number */
+ r1 &= ~(7 << (3 * dn));
+ r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
+ r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
+
+ pci_write_config_dword(pdev, 0x44, r1);
+ pci_write_config_dword(pdev, 0x48, r2);
+}
+
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+/**
+ * hpt3x3_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * Set up the channel for MWDMA or UDMA modes. Much the same as with
+ * PIO, load the mode number and then set MWDMA or UDMA flag.
+ *
+ * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc
+ * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
+ */
+
+static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 r1, r2;
+ int dn = 2 * ap->port_no + adev->devno;
+ int mode_num = adev->dma_mode & 0x0F;
+
+ pci_read_config_dword(pdev, 0x44, &r1);
+ pci_read_config_dword(pdev, 0x48, &r2);
+ /* Load the timing number */
+ r1 &= ~(7 << (3 * dn));
+ r1 |= (mode_num << (3 * dn));
+ r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
+
+ if (adev->dma_mode >= XFER_UDMA_0)
+ r2 |= (0x10 << dn); /* Ultra mode */
+ else
+ r2 |= (0x01 << dn); /* MWDMA */
+
+ pci_write_config_dword(pdev, 0x44, r1);
+ pci_write_config_dword(pdev, 0x48, r2);
+}
+#endif /* CONFIG_PATA_HPT3X3_DMA */
+
+/**
+ * hpt3x3_atapi_dma - ATAPI DMA check
+ * @qc: Queued command
+ *
+ * Just say no - we don't do ATAPI DMA
+ */
+
+static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return 1;
+}
+
+static struct scsi_host_template hpt3x3_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations hpt3x3_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .check_atapi_dma= hpt3x3_atapi_dma,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = hpt3x3_set_piomode,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+ .set_dmamode = hpt3x3_set_dmamode,
+#endif
+};
+
+/**
+ * hpt3x3_init_chipset - chip setup
+ * @dev: PCI device
+ *
+ * Perform the setup required at boot and on resume.
+ */
+
+static void hpt3x3_init_chipset(struct pci_dev *dev)
+{
+ u16 cmd;
+ /* Initialize the board */
+ pci_write_config_word(dev, 0x80, 0x00);
+ /* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_MEMORY)
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
+ else
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
+}
+
+/**
+ * hpt3x3_init_one - Initialise an HPT343/363
+ * @pdev: PCI device
+ * @id: Entry in match table
+ *
+ * Perform basic initialisation. We set the device up so we access all
+ * ports via BAR4. This is neccessary to work around errata.
+ */
+
+static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+ /* Further debug needed */
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07,
+#endif
+ .port_ops = &hpt3x3_port_ops
+ };
+ /* Register offsets of taskfiles in BAR4 area */
+ static const u8 offset_cmd[2] = { 0x20, 0x28 };
+ static const u8 offset_ctl[2] = { 0x36, 0x3E };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ struct ata_host *host;
+ int i, rc;
+ void __iomem *base;
+
+ hpt3x3_init_chipset(pdev);
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host)
+ return -ENOMEM;
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Everything is relative to BAR4 if we set up this way */
+ rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ base = host->iomap[4]; /* Bus mastering base */
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ioaddr->cmd_addr = base + offset_cmd[i];
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = base + offset_ctl[i];
+ ioaddr->scr_addr = NULL;
+ ata_sff_std_ports(ioaddr);
+ ioaddr->bmdma_addr = base + 8 * i;
+
+ ata_port_pbar_desc(ap, 4, -1, "ioport");
+ ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
+ }
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &hpt3x3_sht);
+}
+
+#ifdef CONFIG_PM
+static int hpt3x3_reinit_one(struct pci_dev *dev)
+{
+ hpt3x3_init_chipset(dev);
+ return ata_pci_device_resume(dev);
+}
+#endif
+
+static const struct pci_device_id hpt3x3[] = {
+ { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
+
+ { },
+};
+
+static struct pci_driver hpt3x3_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = hpt3x3,
+ .probe = hpt3x3_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = hpt3x3_reinit_one,
+#endif
+};
+
+static int __init hpt3x3_init(void)
+{
+ return pci_register_driver(&hpt3x3_pci_driver);
+}
+
+
+static void __exit hpt3x3_exit(void)
+{
+ pci_unregister_driver(&hpt3x3_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, hpt3x3);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(hpt3x3_init);
+module_exit(hpt3x3_exit);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
new file mode 100644
index 0000000..cf9e984
--- /dev/null
+++ b/drivers/ata/pata_icside.c
@@ -0,0 +1,639 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+
+#define DRV_NAME "pata_icside"
+
+#define ICS_IDENT_OFFSET 0x2280
+
+#define ICS_ARCIN_V5_INTRSTAT 0x0000
+#define ICS_ARCIN_V5_INTROFFSET 0x0004
+
+#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
+#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
+#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
+#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
+
+struct portinfo {
+ unsigned int dataoffset;
+ unsigned int ctrloffset;
+ unsigned int stepping;
+};
+
+static const struct portinfo pata_icside_portinfo_v5 = {
+ .dataoffset = 0x2800,
+ .ctrloffset = 0x2b80,
+ .stepping = 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_1 = {
+ .dataoffset = 0x2000,
+ .ctrloffset = 0x2380,
+ .stepping = 6,
+};
+
+static const struct portinfo pata_icside_portinfo_v6_2 = {
+ .dataoffset = 0x3000,
+ .ctrloffset = 0x3380,
+ .stepping = 6,
+};
+
+#define PATA_ICSIDE_MAX_SG 128
+
+struct pata_icside_state {
+ void __iomem *irq_port;
+ void __iomem *ioc_base;
+ unsigned int type;
+ unsigned int dma;
+ struct {
+ u8 port_sel;
+ u8 disabled;
+ unsigned int speed[ATA_MAX_DEVICES];
+ } port[2];
+ struct scatterlist sg[PATA_ICSIDE_MAX_SG];
+};
+
+struct pata_icside_info {
+ struct pata_icside_state *state;
+ struct expansion_card *ec;
+ void __iomem *base;
+ void __iomem *irqaddr;
+ unsigned int irqmask;
+ const expansioncard_ops_t *irqops;
+ unsigned int mwdma_mask;
+ unsigned int nr_ports;
+ const struct portinfo *port[2];
+ unsigned long raw_base;
+ unsigned long raw_ioc_base;
+};
+
+#define ICS_TYPE_A3IN 0
+#define ICS_TYPE_A3USER 1
+#define ICS_TYPE_V6 3
+#define ICS_TYPE_V5 15
+#define ICS_TYPE_NOTYPE ((unsigned int)-1)
+
+/* ---------------- Version 5 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+ struct pata_icside_state *state = ec->irq_data;
+
+ writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+ * Purpose : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
+{
+ struct pata_icside_state *state = ec->irq_data;
+
+ readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v5 = {
+ .irqenable = pata_icside_irqenable_arcin_v5,
+ .irqdisable = pata_icside_irqdisable_arcin_v5,
+};
+
+
+/* ---------------- Version 6 PCB Support Functions --------------------- */
+/* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose : enable interrupts from card
+ */
+static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+ struct pata_icside_state *state = ec->irq_data;
+ void __iomem *base = state->irq_port;
+
+ if (!state->port[0].disabled)
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+ if (!state->port[1].disabled)
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+ * Purpose : disable interrupts from card
+ */
+static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
+{
+ struct pata_icside_state *state = ec->irq_data;
+
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+}
+
+/* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
+ * Purpose : detect an active interrupt from card
+ */
+static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec)
+{
+ struct pata_icside_state *state = ec->irq_data;
+
+ return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
+ readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
+}
+
+static const expansioncard_ops_t pata_icside_ops_arcin_v6 = {
+ .irqenable = pata_icside_irqenable_arcin_v6,
+ .irqdisable = pata_icside_irqdisable_arcin_v6,
+ .irqpending = pata_icside_irqpending_arcin_v6,
+};
+
+
+/*
+ * SG-DMA support.
+ *
+ * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
+ * There is only one DMA controller per card, which means that only
+ * one drive can be accessed at one time. NOTE! We do not enforce that
+ * here, but we rely on the main IDE driver spotting that both
+ * interfaces use the same IRQ, which should guarantee this.
+ */
+
+/*
+ * Configure the IOMD to give the appropriate timings for the transfer
+ * mode being requested. We take the advice of the ATA standards, and
+ * calculate the cycle time based on the transfer mode, and the EIDE
+ * MW DMA specs that the drive provides in the IDENTIFY command.
+ *
+ * We have the following IOMD DMA modes to choose from:
+ *
+ * Type Active Recovery Cycle
+ * A 250 (250) 312 (550) 562 (800)
+ * B 187 (200) 250 (550) 437 (750)
+ * C 125 (125) 125 (375) 250 (500)
+ * D 62 (50) 125 (375) 187 (425)
+ *
+ * (figures in brackets are actual measured timings on DIOR/DIOW)
+ *
+ * However, we also need to take care of the read/write active and
+ * recovery timings:
+ *
+ * Read Write
+ * Mode Active -- Recovery -- Cycle IOMD type
+ * MW0 215 50 215 480 A
+ * MW1 80 50 50 150 C
+ * MW2 70 25 25 120 C
+ */
+static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pata_icside_state *state = ap->host->private_data;
+ struct ata_timing t;
+ unsigned int cycle;
+ char iomd_type;
+
+ /*
+ * DMA is based on a 16MHz clock
+ */
+ if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1))
+ return;
+
+ /*
+ * Choose the IOMD cycle timing which ensure that the interface
+ * satisfies the measured active, recovery and cycle times.
+ */
+ if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425)
+ iomd_type = 'D', cycle = 187;
+ else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500)
+ iomd_type = 'C', cycle = 250;
+ else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750)
+ iomd_type = 'B', cycle = 437;
+ else
+ iomd_type = 'A', cycle = 562;
+
+ ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n",
+ t.active, t.recover, t.cycle, iomd_type);
+
+ state->port[ap->port_no].speed[adev->devno] = cycle;
+}
+
+static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_icside_state *state = ap->host->private_data;
+ struct scatterlist *sg, *rsg = state->sg;
+ unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
+ unsigned int si;
+
+ /*
+ * We are simplex; BUG if we try to fiddle with DMA
+ * while it's active.
+ */
+ BUG_ON(dma_channel_active(state->dma));
+
+ /*
+ * Copy ATAs scattered sg list into a contiguous array of sg
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ memcpy(rsg, sg, sizeof(*sg));
+ rsg++;
+ }
+
+ /*
+ * Route the DMA signals to the correct interface
+ */
+ writeb(state->port[ap->port_no].port_sel, state->ioc_base);
+
+ set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
+ set_dma_sg(state->dma, state->sg, rsg - state->sg);
+ set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void pata_icside_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_icside_state *state = ap->host->private_data;
+
+ BUG_ON(dma_channel_active(state->dma));
+ enable_dma(state->dma);
+}
+
+static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_icside_state *state = ap->host->private_data;
+
+ disable_dma(state->dma);
+
+ /* see ata_bmdma_stop */
+ ata_sff_dma_pause(ap);
+}
+
+static u8 pata_icside_bmdma_status(struct ata_port *ap)
+{
+ struct pata_icside_state *state = ap->host->private_data;
+ void __iomem *irq_port;
+
+ irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 :
+ ICS_ARCIN_V6_INTRSTAT_1);
+
+ return readb(irq_port) & 1 ? ATA_DMA_INTR : 0;
+}
+
+static int icside_dma_init(struct pata_icside_info *info)
+{
+ struct pata_icside_state *state = info->state;
+ struct expansion_card *ec = info->ec;
+ int i;
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ state->port[0].speed[i] = 480;
+ state->port[1].speed[i] = 480;
+ }
+
+ if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
+ state->dma = ec->dma;
+ info->mwdma_mask = 0x07; /* MW0..2 */
+ }
+
+ return 0;
+}
+
+
+static struct scsi_host_template pata_icside_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = PATA_ICSIDE_MAX_SG,
+ .dma_boundary = ~0, /* no dma boundaries */
+};
+
+static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
+{
+ struct ata_port *ap = link->ap;
+ struct pata_icside_state *state = ap->host->private_data;
+
+ if (classes[0] != ATA_DEV_NONE || classes[1] != ATA_DEV_NONE)
+ return ata_sff_postreset(link, classes);
+
+ state->port[ap->port_no].disabled = 1;
+
+ if (state->type == ICS_TYPE_V6) {
+ /*
+ * Disable interrupts from this port, otherwise we
+ * receive spurious interrupts from the floating
+ * interrupt line.
+ */
+ void __iomem *irq_port = state->irq_port +
+ (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1);
+ readb(irq_port);
+ }
+}
+
+static struct ata_port_operations pata_icside_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ /* no need to build any PRD tables for DMA */
+ .qc_prep = ata_noop_qc_prep,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .bmdma_setup = pata_icside_bmdma_setup,
+ .bmdma_start = pata_icside_bmdma_start,
+ .bmdma_stop = pata_icside_bmdma_stop,
+ .bmdma_status = pata_icside_bmdma_status,
+
+ .cable_detect = ata_cable_40wire,
+ .set_dmamode = pata_icside_set_dmamode,
+ .postreset = pata_icside_postreset,
+ .post_internal_cmd = pata_icside_bmdma_stop,
+};
+
+static void __devinit
+pata_icside_setup_ioaddr(struct ata_port *ap, void __iomem *base,
+ struct pata_icside_info *info,
+ const struct portinfo *port)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ void __iomem *cmd = base + port->dataoffset;
+
+ ioaddr->cmd_addr = cmd;
+ ioaddr->data_addr = cmd + (ATA_REG_DATA << port->stepping);
+ ioaddr->error_addr = cmd + (ATA_REG_ERR << port->stepping);
+ ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << port->stepping);
+ ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << port->stepping);
+ ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << port->stepping);
+ ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << port->stepping);
+ ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << port->stepping);
+ ioaddr->device_addr = cmd + (ATA_REG_DEVICE << port->stepping);
+ ioaddr->status_addr = cmd + (ATA_REG_STATUS << port->stepping);
+ ioaddr->command_addr = cmd + (ATA_REG_CMD << port->stepping);
+
+ ioaddr->ctl_addr = base + port->ctrloffset;
+ ioaddr->altstatus_addr = ioaddr->ctl_addr;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+ info->raw_base + port->dataoffset,
+ info->raw_base + port->ctrloffset);
+
+ if (info->raw_ioc_base)
+ ata_port_desc(ap, "iocbase 0x%lx", info->raw_ioc_base);
+}
+
+static int __devinit pata_icside_register_v5(struct pata_icside_info *info)
+{
+ struct pata_icside_state *state = info->state;
+ void __iomem *base;
+
+ base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0);
+ if (!base)
+ return -ENOMEM;
+
+ state->irq_port = base;
+
+ info->base = base;
+ info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
+ info->irqmask = 1;
+ info->irqops = &pata_icside_ops_arcin_v5;
+ info->nr_ports = 1;
+ info->port[0] = &pata_icside_portinfo_v5;
+
+ info->raw_base = ecard_resource_start(info->ec, ECARD_RES_MEMC);
+
+ return 0;
+}
+
+static int __devinit pata_icside_register_v6(struct pata_icside_info *info)
+{
+ struct pata_icside_state *state = info->state;
+ struct expansion_card *ec = info->ec;
+ void __iomem *ioc_base, *easi_base;
+ unsigned int sel = 0;
+
+ ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!ioc_base)
+ return -ENOMEM;
+
+ easi_base = ioc_base;
+
+ if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
+ easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
+ if (!easi_base)
+ return -ENOMEM;
+
+ /*
+ * Enable access to the EASI region.
+ */
+ sel = 1 << 5;
+ }
+
+ writeb(sel, ioc_base);
+
+ state->irq_port = easi_base;
+ state->ioc_base = ioc_base;
+ state->port[0].port_sel = sel;
+ state->port[1].port_sel = sel | 1;
+
+ info->base = easi_base;
+ info->irqops = &pata_icside_ops_arcin_v6;
+ info->nr_ports = 2;
+ info->port[0] = &pata_icside_portinfo_v6_1;
+ info->port[1] = &pata_icside_portinfo_v6_2;
+
+ info->raw_base = ecard_resource_start(ec, ECARD_RES_EASI);
+ info->raw_ioc_base = ecard_resource_start(ec, ECARD_RES_IOCFAST);
+
+ return icside_dma_init(info);
+}
+
+static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
+{
+ struct expansion_card *ec = info->ec;
+ struct ata_host *host;
+ int i;
+
+ if (info->irqaddr) {
+ ec->irqaddr = info->irqaddr;
+ ec->irqmask = info->irqmask;
+ }
+ if (info->irqops)
+ ecard_setirq(ec, info->irqops, info->state);
+
+ /*
+ * Be on the safe side - disable interrupts
+ */
+ ec->ops->irqdisable(ec, ec->irq);
+
+ host = ata_host_alloc(&ec->dev, info->nr_ports);
+ if (!host)
+ return -ENOMEM;
+
+ host->private_data = info->state;
+ host->flags = ATA_HOST_SIMPLEX;
+
+ for (i = 0; i < info->nr_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ap->pio_mask = 0x1f;
+ ap->mwdma_mask = info->mwdma_mask;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->ops = &pata_icside_port_ops;
+
+ pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
+ }
+
+ return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0,
+ &pata_icside_sht);
+}
+
+static int __devinit
+pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id)
+{
+ struct pata_icside_state *state;
+ struct pata_icside_info info;
+ void __iomem *idmem;
+ int ret;
+
+ ret = ecard_request_resources(ec);
+ if (ret)
+ goto out;
+
+ state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ state->type = ICS_TYPE_NOTYPE;
+ state->dma = NO_DMA;
+
+ idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (idmem) {
+ unsigned int type;
+
+ type = readb(idmem + ICS_IDENT_OFFSET) & 1;
+ type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
+ type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
+ type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
+ ecardm_iounmap(ec, idmem);
+
+ state->type = type;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.state = state;
+ info.ec = ec;
+
+ switch (state->type) {
+ case ICS_TYPE_A3IN:
+ dev_warn(&ec->dev, "A3IN unsupported\n");
+ ret = -ENODEV;
+ break;
+
+ case ICS_TYPE_A3USER:
+ dev_warn(&ec->dev, "A3USER unsupported\n");
+ ret = -ENODEV;
+ break;
+
+ case ICS_TYPE_V5:
+ ret = pata_icside_register_v5(&info);
+ break;
+
+ case ICS_TYPE_V6:
+ ret = pata_icside_register_v6(&info);
+ break;
+
+ default:
+ dev_warn(&ec->dev, "unknown interface type\n");
+ ret = -ENODEV;
+ break;
+ }
+
+ if (ret == 0)
+ ret = pata_icside_add_ports(&info);
+
+ if (ret == 0)
+ goto out;
+
+ release:
+ ecard_release_resources(ec);
+ out:
+ return ret;
+}
+
+static void pata_icside_shutdown(struct expansion_card *ec)
+{
+ struct ata_host *host = ecard_get_drvdata(ec);
+ unsigned long flags;
+
+ /*
+ * Disable interrupts from this card. We need to do
+ * this before disabling EASI since we may be accessing
+ * this register via that region.
+ */
+ local_irq_save(flags);
+ ec->ops->irqdisable(ec, ec->irq);
+ local_irq_restore(flags);
+
+ /*
+ * Reset the ROM pointer so that we can read the ROM
+ * after a soft reboot. This also disables access to
+ * the IDE taskfile via the EASI region.
+ */
+ if (host) {
+ struct pata_icside_state *state = host->private_data;
+ if (state->ioc_base)
+ writeb(0, state->ioc_base);
+ }
+}
+
+static void __devexit pata_icside_remove(struct expansion_card *ec)
+{
+ struct ata_host *host = ecard_get_drvdata(ec);
+ struct pata_icside_state *state = host->private_data;
+
+ ata_host_detach(host);
+
+ pata_icside_shutdown(ec);
+
+ /*
+ * don't NULL out the drvdata - devres/libata wants it
+ * to free the ata_host structure.
+ */
+ if (state->dma != NO_DMA)
+ free_dma(state->dma);
+
+ ecard_release_resources(ec);
+}
+
+static const struct ecard_id pata_icside_ids[] = {
+ { MANU_ICS, PROD_ICS_IDE },
+ { MANU_ICS2, PROD_ICS2_IDE },
+ { 0xffff, 0xffff }
+};
+
+static struct ecard_driver pata_icside_driver = {
+ .probe = pata_icside_probe,
+ .remove = __devexit_p(pata_icside_remove),
+ .shutdown = pata_icside_shutdown,
+ .id_table = pata_icside_ids,
+ .drv = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init pata_icside_init(void)
+{
+ return ecard_register_driver(&pata_icside_driver);
+}
+
+static void __exit pata_icside_exit(void)
+{
+ ecard_remove_driver(&pata_icside_driver);
+}
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ICS PATA driver");
+
+module_init(pata_icside_init);
+module_exit(pata_icside_exit);
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
new file mode 100644
index 0000000..15cdb91
--- /dev/null
+++ b/drivers/ata/pata_isapnp.c
@@ -0,0 +1,139 @@
+
+/*
+ * pata-isapnp.c - ISA PnP PATA controller driver.
+ * Copyright 2005/2006 Red Hat Inc, all rights reserved.
+ *
+ * Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/isapnp.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_isapnp"
+#define DRV_VERSION "0.2.2"
+
+static struct scsi_host_template isapnp_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations isapnp_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+};
+
+/**
+ * isapnp_init_one - attach an isapnp interface
+ * @idev: PnP device
+ * @dev_id: matching detect line
+ *
+ * Register an ISA bus IDE interface. Such interfaces are PIO 0 and
+ * non shared IRQ.
+ */
+
+static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *cmd_addr, *ctl_addr;
+ int irq = 0;
+ irq_handler_t handler = NULL;
+
+ if (pnp_port_valid(idev, 0) == 0)
+ return -ENODEV;
+
+ if (pnp_irq_valid(idev, 0)) {
+ irq = pnp_irq(idev, 0);
+ handler = ata_sff_interrupt;
+ }
+
+ /* allocate host */
+ host = ata_host_alloc(&idev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
+ if (!cmd_addr)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+
+ ap->ops = &isapnp_port_ops;
+ ap->pio_mask = 1;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ ap->ioaddr.cmd_addr = cmd_addr;
+
+ if (pnp_port_valid(idev, 1) == 0) {
+ ctl_addr = devm_ioport_map(&idev->dev,
+ pnp_port_start(idev, 1), 1);
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ }
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)pnp_port_start(idev, 0),
+ (unsigned long long)pnp_port_start(idev, 1));
+
+ /* activate */
+ return ata_host_activate(host, irq, handler, 0,
+ &isapnp_sht);
+}
+
+/**
+ * isapnp_remove_one - unplug an isapnp interface
+ * @idev: PnP device
+ *
+ * Remove a previously configured PnP ATA port. Called only on module
+ * unload events as the core does not currently deal with ISAPnP docking.
+ */
+
+static void isapnp_remove_one(struct pnp_dev *idev)
+{
+ struct device *dev = &idev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+}
+
+static struct pnp_device_id isapnp_devices[] = {
+ /* Generic ESDI/IDE/ATA compatible hard disk controller */
+ {.id = "PNP0600", .driver_data = 0},
+ {.id = ""}
+};
+
+MODULE_DEVICE_TABLE(pnp, isapnp_devices);
+
+static struct pnp_driver isapnp_driver = {
+ .name = DRV_NAME,
+ .id_table = isapnp_devices,
+ .probe = isapnp_init_one,
+ .remove = isapnp_remove_one,
+};
+
+static int __init isapnp_init(void)
+{
+ return pnp_register_driver(&isapnp_driver);
+}
+
+static void __exit isapnp_exit(void)
+{
+ pnp_unregister_driver(&isapnp_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(isapnp_init);
+module_exit(isapnp_exit);
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
new file mode 100644
index 0000000..c113d7c
--- /dev/null
+++ b/drivers/ata/pata_it8213.c
@@ -0,0 +1,314 @@
+/*
+ * pata_it8213.c - iTE Tech. Inc. IT8213 PATA driver
+ *
+ * The IT8213 is a very Intel ICH like device for timing purposes, having
+ * a similar register layout and the same split clock arrangement. Cable
+ * detection is different, and it does not have slave channels or all the
+ * clutter of later ICH/SATA setups.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_it8213"
+#define DRV_VERSION "0.0.3"
+
+/**
+ * it8213_pre_reset - check for 40/80 pin
+ * @link: link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Filter out ports by the enable bits before doing the normal reset
+ * and probe.
+ */
+
+static int it8213_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits it8213_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ };
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * it8213_cable_detect - check for 40/80 pin
+ * @ap: Port
+ *
+ * Perform cable detection for the 8213 ATA interface. This is
+ * different to the PIIX arrangement
+ */
+
+static int it8213_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 tmp;
+ pci_read_config_byte(pdev, 0x42, &tmp);
+ if (tmp & 2) /* The initial docs are incorrect */
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * it8213_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device whose timings we are configuring
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+ u16 idetm_data;
+ int control = 0;
+
+ /*
+ * See Intel Document 298600-004 for the timing programing rules
+ * for PIIX/ICH. The 8213 is a clone so very similar
+ */
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio > 2)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
+ control |= 2; /* IORDY enable */
+ /* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */
+ if (adev->class != ATA_DEV_ATA)
+ control |= 4;
+
+ pci_read_config_word(dev, idetm_port, &idetm_data);
+
+ /* Enable PPE, IE and TIME as appropriate */
+
+ if (adev->devno == 0) {
+ idetm_data &= 0xCCF0;
+ idetm_data |= control;
+ idetm_data |= (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ } else {
+ u8 slave_data;
+
+ idetm_data &= 0xCC0F;
+ idetm_data |= (control << 4);
+
+ /* Slave timing in separate register */
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= 0xF0;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << 4;
+ pci_write_config_byte(dev, 0x44, slave_data);
+ }
+
+ idetm_data |= 0x4000; /* Ensure SITRE is enabled */
+ pci_write_config_word(dev, idetm_port, idetm_data);
+}
+
+/**
+ * it8213_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ * This device is basically an ICH alike.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u16 master_data;
+ u8 speed = adev->dma_mode;
+ int devid = adev->devno;
+ u8 udma_enable;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, 0x40, &master_data);
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (speed >= XFER_UDMA_0) {
+ unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ u16 udma_timing;
+ u16 ideconf;
+ int u_clock, u_speed;
+
+ /* Clocks follow the PIIX style */
+ u_speed = min(2 - (udma & 1), udma);
+ if (udma == 5)
+ u_clock = 0x1000; /* 100Mhz */
+ else if (udma > 2)
+ u_clock = 1; /* 66Mhz */
+ else
+ u_clock = 0; /* 33Mhz */
+
+ udma_enable |= (1 << devid);
+
+ /* Load the UDMA mode number */
+ pci_read_config_word(dev, 0x4A, &udma_timing);
+ udma_timing &= ~(3 << (4 * devid));
+ udma_timing |= (udma & 3) << (4 * devid);
+ pci_write_config_word(dev, 0x4A, udma_timing);
+
+ /* Load the clock selection */
+ pci_read_config_word(dev, 0x54, &ideconf);
+ ideconf &= ~(0x1001 << devid);
+ ideconf |= u_clock << devid;
+ pci_write_config_word(dev, 0x54, ideconf);
+ } else {
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ u8 slave_data;
+ static const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ control = 3; /* IORDY|TIME1 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ if (devid) { /* Slave */
+ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
+ master_data |= control << 4;
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= (0x0F + 0xE1 * ap->port_no);
+ /* Load the matching timing */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+ pci_write_config_byte(dev, 0x44, slave_data);
+ } else { /* Master */
+ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
+ and master timing bits */
+ master_data |= control;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ udma_enable &= ~(1 << devid);
+ pci_write_config_word(dev, 0x40, master_data);
+ }
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct scsi_host_template it8213_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+static struct ata_port_operations it8213_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = it8213_cable_detect,
+ .set_piomode = it8213_set_piomode,
+ .set_dmamode = it8213_set_dmamode,
+ .prereset = it8213_pre_reset,
+};
+
+
+/**
+ * it8213_init_one - Register 8213 ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in it8213_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA4, /* FIXME: want UDMA 100? */
+ .port_ops = &it8213_ops,
+ };
+ /* Current IT8213 stuff is single port */
+ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL);
+}
+
+static const struct pci_device_id it8213_pci_tbl[] = {
+ { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver it8213_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = it8213_pci_tbl,
+ .probe = it8213_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init it8213_init(void)
+{
+ return pci_register_driver(&it8213_pci_driver);
+}
+
+static void __exit it8213_exit(void)
+{
+ pci_unregister_driver(&it8213_pci_driver);
+}
+
+module_init(it8213_init);
+module_exit(it8213_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
new file mode 100644
index 0000000..860ede5
--- /dev/null
+++ b/drivers/ata/pata_it821x.c
@@ -0,0 +1,985 @@
+/*
+ * pata_it821x.c - IT821x PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * based upon
+ *
+ * it821x.c
+ *
+ * linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
+ *
+ * Copyright (C) 2004 Red Hat
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ * Based in part on the ITE vendor provided SCSI driver.
+ *
+ * Documentation available from
+ * http://www.ite.com.tw/pc/IT8212F_V04.pdf
+ * Some other documents are NDA.
+ *
+ * The ITE8212 isn't exactly a standard IDE controller. It has two
+ * modes. In pass through mode then it is an IDE controller. In its smart
+ * mode its actually quite a capable hardware raid controller disguised
+ * as an IDE controller. Smart mode only understands DMA read/write and
+ * identify, none of the fancier commands apply. The IT8211 is identical
+ * in other respects but lacks the raid mode.
+ *
+ * Errata:
+ * o Rev 0x10 also requires master/slave hold the same DMA timings and
+ * cannot do ATAPI MWDMA.
+ * o The identify data for raid volumes lacks CHS info (technically ok)
+ * but also fails to set the LBA28 and other bits. We fix these in
+ * the IDE probe quirk code.
+ * o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
+ * raid then the controller firmware dies
+ * o Smart mode without RAID doesn't clear all the necessary identify
+ * bits to reduce the command set to the one used
+ *
+ * This has a few impacts on the driver
+ * - In pass through mode we do all the work you would expect
+ * - In smart mode the clocking set up is done by the controller generally
+ * but we must watch the other limits and filter.
+ * - There are a few extra vendor commands that actually talk to the
+ * controller but only work PIO with no IRQ.
+ *
+ * Vendor areas of the identify block in smart mode are used for the
+ * timing and policy set up. Each HDD in raid mode also has a serial
+ * block on the disk. The hardware extra commands are get/set chip status,
+ * rebuild, get rebuild status.
+ *
+ * In Linux the driver supports pass through mode as if the device was
+ * just another IDE controller. If the smart mode is running then
+ * volumes are managed by the controller firmware and each IDE "disk"
+ * is a raid volume. Even more cute - the controller can do automated
+ * hotplug and rebuild.
+ *
+ * The pass through controller itself is a little demented. It has a
+ * flaw that it has a single set of PIO/MWDMA timings per channel so
+ * non UDMA devices restrict each others performance. It also has a
+ * single clock source per channel so mixed UDMA100/133 performance
+ * isn't perfect and we have to pick a clock. Thankfully none of this
+ * matters in smart mode. ATAPI DMA is not currently supported.
+ *
+ * It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
+ *
+ * TODO
+ * - ATAPI and other speed filtering
+ * - RAID configuration ioctls
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+
+#define DRV_NAME "pata_it821x"
+#define DRV_VERSION "0.4.0"
+
+struct it821x_dev
+{
+ unsigned int smart:1, /* Are we in smart raid mode */
+ timing10:1; /* Rev 0x10 */
+ u8 clock_mode; /* 0, ATA_50 or ATA_66 */
+ u8 want[2][2]; /* Mode/Pri log for master slave */
+ /* We need these for switching the clock when DMA goes on/off
+ The high byte is the 66Mhz timing */
+ u16 pio[2]; /* Cached PIO values */
+ u16 mwdma[2]; /* Cached MWDMA values */
+ u16 udma[2]; /* Cached UDMA values (per drive) */
+ u16 last_device; /* Master or slave loaded ? */
+};
+
+#define ATA_66 0
+#define ATA_50 1
+#define ATA_ANY 2
+
+#define UDMA_OFF 0
+#define MWDMA_OFF 0
+
+/*
+ * We allow users to force the card into non raid mode without
+ * flashing the alternative BIOS. This is also necessary right now
+ * for embedded platforms that cannot run a PC BIOS but are using this
+ * device.
+ */
+
+static int it8212_noraid;
+
+/**
+ * it821x_program - program the PIO/MWDMA registers
+ * @ap: ATA port
+ * @adev: Device to program
+ * @timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
+ *
+ * Program the PIO/MWDMA timing for this channel according to the
+ * current clock. These share the same register so are managed by
+ * the DMA start/stop sequence as with the old driver.
+ */
+
+static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct it821x_dev *itdev = ap->private_data;
+ int channel = ap->port_no;
+ u8 conf;
+
+ /* Program PIO/MWDMA timing bits */
+ if (itdev->clock_mode == ATA_66)
+ conf = timing >> 8;
+ else
+ conf = timing & 0xFF;
+ pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
+}
+
+
+/**
+ * it821x_program_udma - program the UDMA registers
+ * @ap: ATA port
+ * @adev: ATA device to update
+ * @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
+ *
+ * Program the UDMA timing for this drive according to the
+ * current clock. Handles the dual clocks and also knows about
+ * the errata on the 0x10 revision. The UDMA errata is partly handled
+ * here and partly in start_dma.
+ */
+
+static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
+{
+ struct it821x_dev *itdev = ap->private_data;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int channel = ap->port_no;
+ int unit = adev->devno;
+ u8 conf;
+
+ /* Program UDMA timing bits */
+ if (itdev->clock_mode == ATA_66)
+ conf = timing >> 8;
+ else
+ conf = timing & 0xFF;
+ if (itdev->timing10 == 0)
+ pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
+ else {
+ /* Early revision must be programmed for both together */
+ pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
+ pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
+ }
+}
+
+/**
+ * it821x_clock_strategy
+ * @ap: ATA interface
+ * @adev: ATA device being updated
+ *
+ * Select between the 50 and 66Mhz base clocks to get the best
+ * results for this interface.
+ */
+
+static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct it821x_dev *itdev = ap->private_data;
+ u8 unit = adev->devno;
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ int clock, altclock;
+ u8 v;
+ int sel = 0;
+
+ /* Look for the most wanted clocking */
+ if (itdev->want[0][0] > itdev->want[1][0]) {
+ clock = itdev->want[0][1];
+ altclock = itdev->want[1][1];
+ } else {
+ clock = itdev->want[1][1];
+ altclock = itdev->want[0][1];
+ }
+
+ /* Master doesn't care does the slave ? */
+ if (clock == ATA_ANY)
+ clock = altclock;
+
+ /* Nobody cares - keep the same clock */
+ if (clock == ATA_ANY)
+ return;
+ /* No change */
+ if (clock == itdev->clock_mode)
+ return;
+
+ /* Load this into the controller */
+ if (clock == ATA_66)
+ itdev->clock_mode = ATA_66;
+ else {
+ itdev->clock_mode = ATA_50;
+ sel = 1;
+ }
+ pci_read_config_byte(pdev, 0x50, &v);
+ v &= ~(1 << (1 + ap->port_no));
+ v |= sel << (1 + ap->port_no);
+ pci_write_config_byte(pdev, 0x50, v);
+
+ /*
+ * Reprogram the UDMA/PIO of the pair drive for the switch
+ * MWDMA will be dealt with by the dma switcher
+ */
+ if (pair && itdev->udma[1-unit] != UDMA_OFF) {
+ it821x_program_udma(ap, pair, itdev->udma[1-unit]);
+ it821x_program(ap, pair, itdev->pio[1-unit]);
+ }
+ /*
+ * Reprogram the UDMA/PIO of our drive for the switch.
+ * MWDMA will be dealt with by the dma switcher
+ */
+ if (itdev->udma[unit] != UDMA_OFF) {
+ it821x_program_udma(ap, adev, itdev->udma[unit]);
+ it821x_program(ap, adev, itdev->pio[unit]);
+ }
+}
+
+/**
+ * it821x_passthru_set_piomode - set PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Configure for PIO mode. This is complicated as the register is
+ * shared by PIO and MWDMA and for both channels.
+ */
+
+static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ /* Spec says 89 ref driver uses 88 */
+ static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
+ static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
+
+ struct it821x_dev *itdev = ap->private_data;
+ int unit = adev->devno;
+ int mode_wanted = adev->pio_mode - XFER_PIO_0;
+
+ /* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
+ itdev->want[unit][1] = pio_want[mode_wanted];
+ itdev->want[unit][0] = 1; /* PIO is lowest priority */
+ itdev->pio[unit] = pio[mode_wanted];
+ it821x_clock_strategy(ap, adev);
+ it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+/**
+ * it821x_passthru_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Set up the DMA modes. The actions taken depend heavily on the mode
+ * to use. If UDMA is used as is hopefully the usual case then the
+ * timing register is private and we need only consider the clock. If
+ * we are using MWDMA then we have to manage the setting ourself as
+ * we switch devices and mode.
+ */
+
+static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u16 dma[] = { 0x8866, 0x3222, 0x3121 };
+ static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
+ static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
+ static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct it821x_dev *itdev = ap->private_data;
+ int channel = ap->port_no;
+ int unit = adev->devno;
+ u8 conf;
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ int mode_wanted = adev->dma_mode - XFER_UDMA_0;
+
+ itdev->want[unit][1] = udma_want[mode_wanted];
+ itdev->want[unit][0] = 3; /* UDMA is high priority */
+ itdev->mwdma[unit] = MWDMA_OFF;
+ itdev->udma[unit] = udma[mode_wanted];
+ if (mode_wanted >= 5)
+ itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
+
+ /* UDMA on. Again revision 0x10 must do the pair */
+ pci_read_config_byte(pdev, 0x50, &conf);
+ if (itdev->timing10)
+ conf &= channel ? 0x9F: 0xE7;
+ else
+ conf &= ~ (1 << (3 + 2 * channel + unit));
+ pci_write_config_byte(pdev, 0x50, conf);
+ it821x_clock_strategy(ap, adev);
+ it821x_program_udma(ap, adev, itdev->udma[unit]);
+ } else {
+ int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
+
+ itdev->want[unit][1] = mwdma_want[mode_wanted];
+ itdev->want[unit][0] = 2; /* MWDMA is low priority */
+ itdev->mwdma[unit] = dma[mode_wanted];
+ itdev->udma[unit] = UDMA_OFF;
+
+ /* UDMA bits off - Revision 0x10 do them in pairs */
+ pci_read_config_byte(pdev, 0x50, &conf);
+ if (itdev->timing10)
+ conf |= channel ? 0x60: 0x18;
+ else
+ conf |= 1 << (3 + 2 * channel + unit);
+ pci_write_config_byte(pdev, 0x50, conf);
+ it821x_clock_strategy(ap, adev);
+ }
+}
+
+/**
+ * it821x_passthru_dma_start - DMA start callback
+ * @qc: Command in progress
+ *
+ * Usually drivers set the DMA timing at the point the set_dmamode call
+ * is made. IT821x however requires we load new timings on the
+ * transitions in some cases.
+ */
+
+static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct it821x_dev *itdev = ap->private_data;
+ int unit = adev->devno;
+
+ if (itdev->mwdma[unit] != MWDMA_OFF)
+ it821x_program(ap, adev, itdev->mwdma[unit]);
+ else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
+ it821x_program_udma(ap, adev, itdev->udma[unit]);
+ ata_bmdma_start(qc);
+}
+
+/**
+ * it821x_passthru_dma_stop - DMA stop callback
+ * @qc: ATA command
+ *
+ * We loaded new timings in dma_start, as a result we need to restore
+ * the PIO timings in dma_stop so that the next command issue gets the
+ * right clock values.
+ */
+
+static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct it821x_dev *itdev = ap->private_data;
+ int unit = adev->devno;
+
+ ata_bmdma_stop(qc);
+ if (itdev->mwdma[unit] != MWDMA_OFF)
+ it821x_program(ap, adev, itdev->pio[unit]);
+}
+
+
+/**
+ * it821x_passthru_dev_select - Select master/slave
+ * @ap: ATA port
+ * @device: Device number (not pointer)
+ *
+ * Device selection hook. If necessary perform clock switching
+ */
+
+static void it821x_passthru_dev_select(struct ata_port *ap,
+ unsigned int device)
+{
+ struct it821x_dev *itdev = ap->private_data;
+ if (itdev && device != itdev->last_device) {
+ struct ata_device *adev = &ap->link.device[device];
+ it821x_program(ap, adev, itdev->pio[adev->devno]);
+ itdev->last_device = device;
+ }
+ ata_sff_dev_select(ap, device);
+}
+
+/**
+ * it821x_smart_qc_issue - wrap qc issue prot
+ * @qc: command
+ *
+ * Wrap the command issue sequence for the IT821x. We need to
+ * perform out own device selection timing loads before the
+ * usual happenings kick off
+ */
+
+static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
+{
+ switch(qc->tf.command)
+ {
+ /* Commands the firmware supports */
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ case ATA_CMD_READ_MULTI:
+ case ATA_CMD_READ_MULTI_EXT:
+ case ATA_CMD_WRITE_MULTI:
+ case ATA_CMD_WRITE_MULTI_EXT:
+ case ATA_CMD_ID_ATA:
+ case ATA_CMD_INIT_DEV_PARAMS:
+ case 0xFC: /* Internal 'report rebuild state' */
+ /* Arguably should just no-op this one */
+ case ATA_CMD_SET_FEATURES:
+ return ata_sff_qc_issue(qc);
+ }
+ printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+ return AC_ERR_DEV;
+}
+
+/**
+ * it821x_passthru_qc_issue - wrap qc issue prot
+ * @qc: command
+ *
+ * Wrap the command issue sequence for the IT821x. We need to
+ * perform out own device selection timing loads before the
+ * usual happenings kick off
+ */
+
+static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc)
+{
+ it821x_passthru_dev_select(qc->ap, qc->dev->devno);
+ return ata_sff_qc_issue(qc);
+}
+
+/**
+ * it821x_smart_set_mode - mode setting
+ * @link: interface to set up
+ * @unused: device that failed (error only)
+ *
+ * Use a non standard set_mode function. We don't want to be tuned.
+ * The BIOS configured everything. Our job is not to fiddle. We
+ * read the dma enabled bits from the PCI configuration of the device
+ * and respect them.
+ */
+
+static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+ /* We don't really care */
+ dev->pio_mode = XFER_PIO_0;
+ dev->dma_mode = XFER_MW_DMA_0;
+ /* We do need the right mode information for DMA or PIO
+ and this comes from the current configuration flags */
+ if (ata_id_has_dma(dev->id)) {
+ ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+ dev->xfer_mode = XFER_MW_DMA_0;
+ dev->xfer_shift = ATA_SHIFT_MWDMA;
+ dev->flags &= ~ATA_DFLAG_PIO;
+ } else {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * it821x_dev_config - Called each device identify
+ * @adev: Device that has just been identified
+ *
+ * Perform the initial setup needed for each device that is chip
+ * special. In our case we need to lock the sector count to avoid
+ * blowing the brains out of the firmware with large LBA48 requests
+ *
+ * FIXME: When FUA appears we need to block FUA too. And SMART and
+ * basically we need to filter commands for this chip.
+ */
+
+static void it821x_dev_config(struct ata_device *adev)
+{
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+ ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ if (adev->max_sectors > 255)
+ adev->max_sectors = 255;
+
+ if (strstr(model_num, "Integrated Technology Express")) {
+ /* RAID mode */
+ ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume",
+ adev->id[147]?"Bootable ":"",
+ adev->id[129]);
+ if (adev->id[129] != 1)
+ printk("(%dK stripe)", adev->id[146]);
+ printk(".\n");
+ }
+ /* This is a controller firmware triggered funny, don't
+ report the drive faulty! */
+ adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC;
+ /* No HPA in 'smart' mode */
+ adev->horkage |= ATA_HORKAGE_BROKEN_HPA;
+}
+
+/**
+ * it821x_read_id - Hack identify data up
+ * @adev: device to read
+ * @tf: proposed taskfile
+ * @id: buffer for returned ident data
+ *
+ * Query the devices on this firmware driven port and slightly
+ * mash the identify data to stop us and common tools trying to
+ * use features not firmware supported. The firmware itself does
+ * some masking (eg SMART) but not enough.
+ */
+
+static unsigned int it821x_read_id(struct ata_device *adev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ unsigned int err_mask;
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+ err_mask = ata_do_dev_read_id(adev, tf, id);
+ if (err_mask)
+ return err_mask;
+ ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ id[83] &= ~(1 << 12); /* Cache flush is firmware handled */
+ id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */
+ id[84] &= ~(1 << 6); /* No FUA */
+ id[85] &= ~(1 << 10); /* No HPA */
+ id[76] = 0; /* No NCQ/AN etc */
+
+ if (strstr(model_num, "Integrated Technology Express")) {
+ /* Set feature bits the firmware neglects */
+ id[49] |= 0x0300; /* LBA, DMA */
+ id[83] &= 0x7FFF;
+ id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
+ id[86] |= 0x0400; /* LBA48 on */
+ id[ATA_ID_MAJOR_VER] |= 0x1F;
+ }
+ return err_mask;
+}
+
+/**
+ * it821x_check_atapi_dma - ATAPI DMA handler
+ * @qc: Command we are about to issue
+ *
+ * Decide if this ATAPI command can be issued by DMA on this
+ * controller. Return 0 if it can be.
+ */
+
+static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct it821x_dev *itdev = ap->private_data;
+
+ /* Only use dma for transfers to/from the media. */
+ if (ata_qc_raw_nbytes(qc) < 2048)
+ return -EOPNOTSUPP;
+
+ /* No ATAPI DMA in smart mode */
+ if (itdev->smart)
+ return -EOPNOTSUPP;
+ /* No ATAPI DMA on rev 10 */
+ if (itdev->timing10)
+ return -EOPNOTSUPP;
+ /* Cool */
+ return 0;
+}
+
+/**
+ * it821x_display_disk - display disk setup
+ * @n: Device number
+ * @buf: Buffer block from firmware
+ *
+ * Produce a nice informative display of the device setup as provided
+ * by the firmware.
+ */
+
+static void it821x_display_disk(int n, u8 *buf)
+{
+ unsigned char id[41];
+ int mode = 0;
+ char *mtype = "";
+ char mbuf[8];
+ char *cbl = "(40 wire cable)";
+
+ static const char *types[5] = {
+ "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK"
+ };
+
+ if (buf[52] > 4) /* No Disk */
+ return;
+
+ ata_id_c_string((u16 *)buf, id, 0, 41);
+
+ if (buf[51]) {
+ mode = ffs(buf[51]);
+ mtype = "UDMA";
+ } else if (buf[49]) {
+ mode = ffs(buf[49]);
+ mtype = "MWDMA";
+ }
+
+ if (buf[76])
+ cbl = "";
+
+ if (mode)
+ snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
+ else
+ strcpy(mbuf, "PIO");
+ if (buf[52] == 4)
+ printk(KERN_INFO "%d: %-6s %-8s %s %s\n",
+ n, mbuf, types[buf[52]], id, cbl);
+ else
+ printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n",
+ n, mbuf, types[buf[52]], buf[53], id, cbl);
+ if (buf[125] < 100)
+ printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]);
+}
+
+/**
+ * it821x_firmware_command - issue firmware command
+ * @ap: IT821x port to interrogate
+ * @cmd: command
+ * @len: length
+ *
+ * Issue firmware commands expecting data back from the controller. We
+ * use this to issue commands that do not go via the normal paths. Other
+ * commands such as 0xFC can be issued normally.
+ */
+
+static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
+{
+ u8 status;
+ int n = 0;
+ u16 *buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL) {
+ printk(KERN_ERR "it821x_firmware_command: Out of memory\n");
+ return NULL;
+ }
+ /* This isn't quite a normal ATA command as we are talking to the
+ firmware not the drives */
+ ap->ctl |= ATA_NIEN;
+ iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+ ata_wait_idle(ap);
+ iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr);
+ iowrite8(cmd, ap->ioaddr.command_addr);
+ udelay(1);
+ /* This should be almost immediate but a little paranoia goes a long
+ way. */
+ while(n++ < 10) {
+ status = ioread8(ap->ioaddr.status_addr);
+ if (status & ATA_ERR) {
+ kfree(buf);
+ printk(KERN_ERR "it821x_firmware_command: rejected\n");
+ return NULL;
+ }
+ if (status & ATA_DRQ) {
+ ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
+ return (u8 *)buf;
+ }
+ mdelay(1);
+ }
+ kfree(buf);
+ printk(KERN_ERR "it821x_firmware_command: timeout\n");
+ return NULL;
+}
+
+/**
+ * it821x_probe_firmware - firmware reporting/setup
+ * @ap: IT821x port being probed
+ *
+ * Probe the firmware of the controller by issuing firmware command
+ * 0xFA and analysing the returned data.
+ */
+
+static void it821x_probe_firmware(struct ata_port *ap)
+{
+ u8 *buf;
+ int i;
+
+ /* This is a bit ugly as we can't just issue a task file to a device
+ as this is controller magic */
+
+ buf = it821x_firmware_command(ap, 0xFA, 512);
+
+ if (buf != NULL) {
+ printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
+ buf[505],
+ buf[506],
+ buf[507],
+ buf[508]);
+ for (i = 0; i < 4; i++)
+ it821x_display_disk(i, buf + 128 * i);
+ kfree(buf);
+ }
+}
+
+
+
+/**
+ * it821x_port_start - port setup
+ * @ap: ATA port being set up
+ *
+ * The it821x needs to maintain private data structures and also to
+ * use the standard PCI interface which lacks support for this
+ * functionality. We instead set up the private data on the port
+ * start hook, and tear it down on port stop
+ */
+
+static int it821x_port_start(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct it821x_dev *itdev;
+ u8 conf;
+
+ int ret = ata_sff_port_start(ap);
+ if (ret < 0)
+ return ret;
+
+ itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL);
+ if (itdev == NULL)
+ return -ENOMEM;
+ ap->private_data = itdev;
+
+ pci_read_config_byte(pdev, 0x50, &conf);
+
+ if (conf & 1) {
+ itdev->smart = 1;
+ /* Long I/O's although allowed in LBA48 space cause the
+ onboard firmware to enter the twighlight zone */
+ /* No ATAPI DMA in this mode either */
+ if (ap->port_no == 0)
+ it821x_probe_firmware(ap);
+ }
+ /* Pull the current clocks from 0x50 */
+ if (conf & (1 << (1 + ap->port_no)))
+ itdev->clock_mode = ATA_50;
+ else
+ itdev->clock_mode = ATA_66;
+
+ itdev->want[0][1] = ATA_ANY;
+ itdev->want[1][1] = ATA_ANY;
+ itdev->last_device = -1;
+
+ if (pdev->revision == 0x10) {
+ itdev->timing10 = 1;
+ /* Need to disable ATAPI DMA for this case */
+ if (!itdev->smart)
+ printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
+ }
+
+ return 0;
+}
+
+/**
+ * it821x_rdc_cable - Cable detect for RDC1010
+ * @ap: port we are checking
+ *
+ * Return the RDC1010 cable type. Unlike the IT821x we know how to do
+ * this and can do host side cable detect
+ */
+
+static int it821x_rdc_cable(struct ata_port *ap)
+{
+ u16 r40;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ pci_read_config_word(pdev, 0x40, &r40);
+ if (r40 & (1 << (2 + ap->port_no)))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+static struct scsi_host_template it821x_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations it821x_smart_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+ .qc_issue = it821x_smart_qc_issue,
+
+ .cable_detect = ata_cable_80wire,
+ .set_mode = it821x_smart_set_mode,
+ .dev_config = it821x_dev_config,
+ .read_id = it821x_read_id,
+
+ .port_start = it821x_port_start,
+};
+
+static struct ata_port_operations it821x_passthru_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+ .sff_dev_select = it821x_passthru_dev_select,
+ .bmdma_start = it821x_passthru_bmdma_start,
+ .bmdma_stop = it821x_passthru_bmdma_stop,
+ .qc_issue = it821x_passthru_qc_issue,
+
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = it821x_passthru_set_piomode,
+ .set_dmamode = it821x_passthru_set_dmamode,
+
+ .port_start = it821x_port_start,
+};
+
+static struct ata_port_operations it821x_rdc_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma= it821x_check_atapi_dma,
+ .sff_dev_select = it821x_passthru_dev_select,
+ .bmdma_start = it821x_passthru_bmdma_start,
+ .bmdma_stop = it821x_passthru_bmdma_stop,
+ .qc_issue = it821x_passthru_qc_issue,
+
+ .cable_detect = it821x_rdc_cable,
+ .set_piomode = it821x_passthru_set_piomode,
+ .set_dmamode = it821x_passthru_set_dmamode,
+
+ .port_start = it821x_port_start,
+};
+
+static void it821x_disable_raid(struct pci_dev *pdev)
+{
+ /* Neither the RDC nor the IT8211 */
+ if (pdev->vendor != PCI_VENDOR_ID_ITE ||
+ pdev->device != PCI_DEVICE_ID_ITE_8212)
+ return;
+
+ /* Reset local CPU, and set BIOS not ready */
+ pci_write_config_byte(pdev, 0x5E, 0x01);
+
+ /* Set to bypass mode, and reset PCI bus */
+ pci_write_config_byte(pdev, 0x50, 0x00);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ PCI_COMMAND_PARITY | PCI_COMMAND_IO |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ pci_write_config_word(pdev, 0x40, 0xA0F3);
+
+ pci_write_config_dword(pdev,0x4C, 0x02040204);
+ pci_write_config_byte(pdev, 0x42, 0x36);
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
+}
+
+
+static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u8 conf;
+
+ static const struct ata_port_info info_smart = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &it821x_smart_port_ops
+ };
+ static const struct ata_port_info info_passthru = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &it821x_passthru_port_ops
+ };
+ static const struct ata_port_info info_rdc = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ /* No UDMA */
+ .port_ops = &it821x_rdc_port_ops
+ };
+
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ static char *mode[2] = { "pass through", "smart" };
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->vendor == PCI_VENDOR_ID_RDC) {
+ ppi[0] = &info_rdc;
+ } else {
+ /* Force the card into bypass mode if so requested */
+ if (it8212_noraid) {
+ printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
+ it821x_disable_raid(pdev);
+ }
+ pci_read_config_byte(pdev, 0x50, &conf);
+ conf &= 1;
+
+ printk(KERN_INFO DRV_NAME": controller in %s mode.\n",
+ mode[conf]);
+ if (conf == 0)
+ ppi[0] = &info_passthru;
+ else
+ ppi[0] = &info_smart;
+ }
+ return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int it821x_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ /* Resume - turn raid back off if need be */
+ if (it8212_noraid)
+ it821x_disable_raid(pdev);
+ ata_host_resume(host);
+ return rc;
+}
+#endif
+
+static const struct pci_device_id it821x[] = {
+ { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
+ { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
+ { PCI_VDEVICE(RDC, 0x1010), },
+
+ { },
+};
+
+static struct pci_driver it821x_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = it821x,
+ .probe = it821x_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = it821x_reinit_one,
+#endif
+};
+
+static int __init it821x_init(void)
+{
+ return pci_register_driver(&it821x_pci_driver);
+}
+
+static void __exit it821x_exit(void)
+{
+ pci_unregister_driver(&it821x_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, it821x);
+MODULE_VERSION(DRV_VERSION);
+
+
+module_param_named(noraid, it8212_noraid, int, S_IRUGO);
+MODULE_PARM_DESC(noraid, "Force card into bypass mode");
+
+module_init(it821x_init);
+module_exit(it821x_exit);
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
new file mode 100644
index 0000000..2014253
--- /dev/null
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -0,0 +1,227 @@
+/*
+ * ixp4xx PATA/Compact Flash driver
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * An ATA driver to handle a Compact Flash connected
+ * to the ixp4xx expansion bus in TrueIDE mode. The CF
+ * must have it chip selects connected to two CS lines
+ * on the ixp4xx. In the irq is not available, you might
+ * want to modify both this driver and libata to run in
+ * polling mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME "pata_ixp4xx_cf"
+#define DRV_VERSION "0.2"
+
+static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
+ dev->pio_mode = XFER_PIO_0;
+ dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ }
+ }
+ return 0;
+}
+
+static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ unsigned int i;
+ unsigned int words = buflen >> 1;
+ u16 *buf16 = (u16 *) buf;
+ struct ata_port *ap = dev->link->ap;
+ void __iomem *mmio = ap->ioaddr.data_addr;
+ struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+
+ /* set the expansion bus in 16bit mode and restore
+ * 8 bit mode after the transaction.
+ */
+ *data->cs0_cfg &= ~(0x01);
+ udelay(100);
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ for (i = 0; i < words; i++)
+ buf16[i] = readw(mmio);
+ else
+ for (i = 0; i < words; i++)
+ writew(buf16[i], mmio);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (rw == READ) {
+ align_buf[0] = readw(mmio);
+ memcpy(trailing_buf, align_buf, 1);
+ } else {
+ memcpy(align_buf, trailing_buf, 1);
+ writew(align_buf[0], mmio);
+ }
+ words++;
+ }
+
+ udelay(100);
+ *data->cs0_cfg |= 0x01;
+
+ return words << 1;
+}
+
+static struct scsi_host_template ixp4xx_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ixp4xx_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ixp4xx_mmio_data_xfer,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = ixp4xx_set_mode,
+};
+
+static void ixp4xx_setup_port(struct ata_port *ap,
+ struct ixp4xx_pata_data *data,
+ unsigned long raw_cs0, unsigned long raw_cs1)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned long raw_cmd = raw_cs0;
+ unsigned long raw_ctl = raw_cs1 + 0x06;
+
+ ioaddr->cmd_addr = data->cs0;
+ ioaddr->altstatus_addr = data->cs1 + 0x06;
+ ioaddr->ctl_addr = data->cs1 + 0x06;
+
+ ata_sff_std_ports(ioaddr);
+
+#ifndef __ARMEB__
+
+ /* adjust the addresses to handle the address swizzling of the
+ * ixp4xx in little endian mode.
+ */
+
+ *(unsigned long *)&ioaddr->data_addr ^= 0x02;
+ *(unsigned long *)&ioaddr->cmd_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->altstatus_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->ctl_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->error_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->feature_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->nsect_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->lbal_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->lbam_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->lbah_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->device_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->status_addr ^= 0x03;
+ *(unsigned long *)&ioaddr->command_addr ^= 0x03;
+
+ raw_cmd ^= 0x03;
+ raw_ctl ^= 0x03;
+#endif
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl);
+}
+
+static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ struct resource *cs0, *cs1;
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct ixp4xx_pata_data *data = pdev->dev.platform_data;
+
+ cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ if (!cs0 || !cs1)
+ return -EINVAL;
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
+
+ data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
+ data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
+
+ if (!data->cs0 || !data->cs1)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq)
+ set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+
+ /* Setup expansion bus chip selects */
+ *data->cs0_cfg = data->cs0_bits;
+ *data->cs1_cfg = data->cs1_bits;
+
+ ap = host->ports[0];
+
+ ap->ops = &ixp4xx_port_ops;
+ ap->pio_mask = 0x1f; /* PIO4 */
+ ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
+
+ ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
+
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* activate host */
+ return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
+}
+
+static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
+{
+ struct ata_host *host = platform_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+
+static struct platform_driver ixp4xx_pata_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ixp4xx_pata_probe,
+ .remove = __devexit_p(ixp4xx_pata_remove),
+};
+
+static int __init ixp4xx_pata_init(void)
+{
+ return platform_driver_register(&ixp4xx_pata_platform_driver);
+}
+
+static void __exit ixp4xx_pata_exit(void)
+{
+ platform_driver_unregister(&ixp4xx_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+
+module_init(ixp4xx_pata_init);
+module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
new file mode 100644
index 0000000..38cf1ab
--- /dev/null
+++ b/drivers/ata/pata_jmicron.c
@@ -0,0 +1,185 @@
+/*
+ * pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
+ * PATA port of the controller. The SATA ports are
+ * driven by AHCI in the usual configuration although
+ * this driver can handle other setups if we need it.
+ *
+ * (c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_jmicron"
+#define DRV_VERSION "0.1.5"
+
+typedef enum {
+ PORT_PATA0 = 0,
+ PORT_PATA1 = 1,
+ PORT_SATA = 2,
+} port_type;
+
+/**
+ * jmicron_pre_reset - check for 40/80 pin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the PATA port setup we need.
+ *
+ * On the Jmicron 361/363 there is a single PATA port that can be mapped
+ * either as primary or secondary (or neither). We don't do any policy
+ * and setup here. We assume that has been done by init_one and the
+ * BIOS.
+ */
+static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 control;
+ u32 control5;
+ int port_mask = 1<< (4 * ap->port_no);
+ int port = ap->port_no;
+ port_type port_map[2];
+
+ /* Check if our port is enabled */
+ pci_read_config_dword(pdev, 0x40, &control);
+ if ((control & port_mask) == 0)
+ return -ENOENT;
+
+ /* There are two basic mappings. One has the two SATA ports merged
+ as master/slave and the secondary as PATA, the other has only the
+ SATA port mapped */
+ if (control & (1 << 23)) {
+ port_map[0] = PORT_SATA;
+ port_map[1] = PORT_PATA0;
+ } else {
+ port_map[0] = PORT_SATA;
+ port_map[1] = PORT_SATA;
+ }
+
+ /* The 365/366 may have this bit set to map the second PATA port
+ as the internal primary channel */
+ pci_read_config_dword(pdev, 0x80, &control5);
+ if (control5 & (1<<24))
+ port_map[0] = PORT_PATA1;
+
+ /* The two ports may then be logically swapped by the firmware */
+ if (control & (1 << 22))
+ port = port ^ 1;
+
+ /*
+ * Now we know which physical port we are talking about we can
+ * actually do our cable checking etc. Thankfully we don't need
+ * to do the plumbing for other cases.
+ */
+ switch (port_map[port]) {
+ case PORT_PATA0:
+ if ((control & (1 << 5)) == 0)
+ return -ENOENT;
+ if (control & (1 << 3)) /* 40/80 pin primary */
+ ap->cbl = ATA_CBL_PATA40;
+ else
+ ap->cbl = ATA_CBL_PATA80;
+ break;
+ case PORT_PATA1:
+ /* Bit 21 is set if the port is enabled */
+ if ((control5 & (1 << 21)) == 0)
+ return -ENOENT;
+ if (control5 & (1 << 19)) /* 40/80 pin secondary */
+ ap->cbl = ATA_CBL_PATA40;
+ else
+ ap->cbl = ATA_CBL_PATA80;
+ break;
+ case PORT_SATA:
+ ap->cbl = ATA_CBL_SATA;
+ break;
+ }
+ return ata_sff_prereset(link, deadline);
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template jmicron_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations jmicron_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = jmicron_pre_reset,
+};
+
+
+/**
+ * jmicron_init_one - Register Jmicron ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in jmicron_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+
+ .port_ops = &jmicron_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL);
+}
+
+static const struct pci_device_id jmicron_pci_tbl[] = {
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
+ { } /* terminate list */
+};
+
+static struct pci_driver jmicron_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = jmicron_pci_tbl,
+ .probe = jmicron_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init jmicron_init(void)
+{
+ return pci_register_driver(&jmicron_pci_driver);
+}
+
+static void __exit jmicron_exit(void)
+{
+ pci_unregister_driver(&jmicron_pci_driver);
+}
+
+module_init(jmicron_init);
+module_exit(jmicron_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
new file mode 100644
index 0000000..930c220
--- /dev/null
+++ b/drivers/ata/pata_legacy.c
@@ -0,0 +1,1296 @@
+/*
+ * pata-legacy.c - Legacy port PATA/SATA controller driver.
+ * Copyright 2005/2006 Red Hat, all rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * An ATA driver for the legacy ATA ports.
+ *
+ * Data Sources:
+ * Opti 82C465/82C611 support: Data sheets at opti-inc.com
+ * HT6560 series:
+ * Promise 20230/20620:
+ * http://www.ryston.cz/petr/vlb/pdc20230b.html
+ * http://www.ryston.cz/petr/vlb/pdc20230c.html
+ * http://www.ryston.cz/petr/vlb/pdc20630.html
+ *
+ * Unsupported but docs exist:
+ * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
+ *
+ * This driver handles legacy (that is "ISA/VLB side") IDE ports found
+ * on PC class systems. There are three hybrid devices that are exceptions
+ * The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
+ * the MPIIX where the tuning is PCI side but the IDE is "ISA side".
+ *
+ * Specific support is included for the ht6560a/ht6560b/opti82c611a/
+ * opti82c465mv/promise 20230c/20630/winbond83759A
+ *
+ * Use the autospeed and pio_mask options with:
+ * Appian ADI/2 aka CLPD7220 or AIC25VL01.
+ * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
+ * Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
+ * Winbond W83759A, Promise PDC20230-B
+ *
+ * For now use autospeed and pio_mask as above with the W83759A. This may
+ * change.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_legacy"
+#define DRV_VERSION "0.6.5"
+
+#define NR_HOST 6
+
+static int all;
+module_param(all, int, 0444);
+MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
+
+struct legacy_data {
+ unsigned long timing;
+ u8 clock[2];
+ u8 last;
+ int fast;
+ struct platform_device *platform_dev;
+
+};
+
+enum controller {
+ BIOS = 0,
+ SNOOP = 1,
+ PDC20230 = 2,
+ HT6560A = 3,
+ HT6560B = 4,
+ OPTI611A = 5,
+ OPTI46X = 6,
+ QDI6500 = 7,
+ QDI6580 = 8,
+ QDI6580DP = 9, /* Dual channel mode is different */
+ W83759A = 10,
+
+ UNKNOWN = -1
+};
+
+
+struct legacy_probe {
+ unsigned char *name;
+ unsigned long port;
+ unsigned int irq;
+ unsigned int slot;
+ enum controller type;
+ unsigned long private;
+};
+
+struct legacy_controller {
+ const char *name;
+ struct ata_port_operations *ops;
+ unsigned int pio_mask;
+ unsigned int flags;
+ int (*setup)(struct platform_device *, struct legacy_probe *probe,
+ struct legacy_data *data);
+};
+
+static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+
+static struct legacy_probe probe_list[NR_HOST];
+static struct legacy_data legacy_data[NR_HOST];
+static struct ata_host *legacy_host[NR_HOST];
+static int nr_legacy_host;
+
+
+static int probe_all; /* Set to check all ISA port ranges */
+static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
+static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
+static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
+static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
+static int qdi; /* Set to probe QDI controllers */
+static int winbond; /* Set to probe Winbond controllers,
+ give I/O port if non standard */
+static int autospeed; /* Chip present which snoops speed changes */
+static int pio_mask = 0x1F; /* PIO range for autospeed devices */
+static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+
+/**
+ * legacy_probe_add - Add interface to probe list
+ * @port: Controller port
+ * @irq: IRQ number
+ * @type: Controller type
+ * @private: Controller specific info
+ *
+ * Add an entry into the probe list for ATA controllers. This is used
+ * to add the default ISA slots and then to build up the table
+ * further according to other ISA/VLB/Weird device scans
+ *
+ * An I/O port list is used to keep ordering stable and sane, as we
+ * don't have any good way to talk about ordering otherwise
+ */
+
+static int legacy_probe_add(unsigned long port, unsigned int irq,
+ enum controller type, unsigned long private)
+{
+ struct legacy_probe *lp = &probe_list[0];
+ int i;
+ struct legacy_probe *free = NULL;
+
+ for (i = 0; i < NR_HOST; i++) {
+ if (lp->port == 0 && free == NULL)
+ free = lp;
+ /* Matching port, or the correct slot for ordering */
+ if (lp->port == port || legacy_port[i] == port) {
+ free = lp;
+ break;
+ }
+ lp++;
+ }
+ if (free == NULL) {
+ printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
+ return -1;
+ }
+ /* Fill in the entry for later probing */
+ free->port = port;
+ free->irq = irq;
+ free->type = type;
+ free->private = private;
+ return 0;
+}
+
+
+/**
+ * legacy_set_mode - mode setting
+ * @link: IDE link
+ * @unused: Device that failed when error is returned
+ *
+ * Use a non standard set_mode function. We don't want to be tuned.
+ *
+ * The BIOS configured everything. Our job is not to fiddle. Just use
+ * whatever PIO the hardware is using and leave it at that. When we
+ * get some kind of nice user driven API for control then we can
+ * expand on this as per hdparm in the base kernel.
+ */
+
+static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+ ata_dev_printk(dev, KERN_INFO,
+ "configured for PIO\n");
+ dev->pio_mode = XFER_PIO_0;
+ dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ }
+ }
+ return 0;
+}
+
+static struct scsi_host_template legacy_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static const struct ata_port_operations legacy_base_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+};
+
+/*
+ * These ops are used if the user indicates the hardware
+ * snoops the commands to decide on the mode and handles the
+ * mode selection "magically" itself. Several legacy controllers
+ * do this. The mode range can be set if it is not 0x1F by setting
+ * pio_mask as well.
+ */
+
+static struct ata_port_operations simple_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+};
+
+static struct ata_port_operations legacy_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .set_mode = legacy_set_mode,
+};
+
+/*
+ * Promise 20230C and 20620 support
+ *
+ * This controller supports PIO0 to PIO2. We set PIO timings
+ * conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
+ * support is weird being DMA to controller and PIO'd to the host
+ * and not supported.
+ */
+
+static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ int tries = 5;
+ int pio = adev->pio_mode - XFER_PIO_0;
+ u8 rt;
+ unsigned long flags;
+
+ /* Safe as UP only. Force I/Os to occur together */
+
+ local_irq_save(flags);
+
+ /* Unlock the control interface */
+ do {
+ inb(0x1F5);
+ outb(inb(0x1F2) | 0x80, 0x1F2);
+ inb(0x1F2);
+ inb(0x3F6);
+ inb(0x3F6);
+ inb(0x1F2);
+ inb(0x1F2);
+ }
+ while ((inb(0x1F2) & 0x80) && --tries);
+
+ local_irq_restore(flags);
+
+ outb(inb(0x1F4) & 0x07, 0x1F4);
+
+ rt = inb(0x1F3);
+ rt &= 0x07 << (3 * adev->devno);
+ if (pio)
+ rt |= (1 + 3 * pio) << (3 * adev->devno);
+
+ udelay(100);
+ outb(inb(0x1F2) | 0x01, 0x1F2);
+ udelay(100);
+ inb(0x1F5);
+
+}
+
+static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ if (ata_id_has_dword_io(dev->id)) {
+ struct ata_port *ap = dev->link->ap;
+ int slop = buflen & 3;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Perform the 32bit I/O synchronization sequence */
+ ioread8(ap->ioaddr.nsect_addr);
+ ioread8(ap->ioaddr.nsect_addr);
+ ioread8(ap->ioaddr.nsect_addr);
+
+ /* Now the data */
+ if (rw == READ)
+ ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ else
+ iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+ if (unlikely(slop)) {
+ __le32 pad;
+ if (rw == READ) {
+ pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ memcpy(buf + buflen - slop, &pad, slop);
+ } else {
+ memcpy(&pad, buf + buflen - slop, slop);
+ iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ }
+ buflen += 4 - slop;
+ }
+ local_irq_restore(flags);
+ } else
+ buflen = ata_sff_data_xfer_noirq(dev, buf, buflen, rw);
+
+ return buflen;
+}
+
+static struct ata_port_operations pdc20230_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = pdc20230_set_piomode,
+ .sff_data_xfer = pdc_data_xfer_vlb,
+};
+
+/*
+ * Holtek 6560A support
+ *
+ * This controller supports PIO0 to PIO2 (no IORDY even though higher
+ * timings can be loaded).
+ */
+
+static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ u8 active, recover;
+ struct ata_timing t;
+
+ /* Get the timing data in cycles. For now play safe at 50Mhz */
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+ active = clamp_val(t.active, 2, 15);
+ recover = clamp_val(t.recover, 4, 15);
+
+ inb(0x3E6);
+ inb(0x3E6);
+ inb(0x3E6);
+ inb(0x3E6);
+
+ iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+ ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560a_set_piomode,
+};
+
+/*
+ * Holtek 6560B support
+ *
+ * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
+ * setting unless we see an ATAPI device in which case we force it off.
+ *
+ * FIXME: need to implement 2nd channel support.
+ */
+
+static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ u8 active, recover;
+ struct ata_timing t;
+
+ /* Get the timing data in cycles. For now play safe at 50Mhz */
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+ active = clamp_val(t.active, 2, 15);
+ recover = clamp_val(t.recover, 2, 16);
+ recover &= 0x15;
+
+ inb(0x3E6);
+ inb(0x3E6);
+ inb(0x3E6);
+ inb(0x3E6);
+
+ iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
+
+ if (adev->class != ATA_DEV_ATA) {
+ u8 rconf = inb(0x3E6);
+ if (rconf & 0x24) {
+ rconf &= ~0x24;
+ outb(rconf, 0x3E6);
+ }
+ }
+ ioread8(ap->ioaddr.status_addr);
+}
+
+static struct ata_port_operations ht6560b_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = ht6560b_set_piomode,
+};
+
+/*
+ * Opti core chipset helpers
+ */
+
+/**
+ * opti_syscfg - read OPTI chipset configuration
+ * @reg: Configuration register to read
+ *
+ * Returns the value of an OPTI system board configuration register.
+ */
+
+static u8 opti_syscfg(u8 reg)
+{
+ unsigned long flags;
+ u8 r;
+
+ /* Uniprocessor chipset and must force cycles adjancent */
+ local_irq_save(flags);
+ outb(reg, 0x22);
+ r = inb(0x24);
+ local_irq_restore(flags);
+ return r;
+}
+
+/*
+ * Opti 82C611A
+ *
+ * This controller supports PIO0 to PIO3.
+ */
+
+static void opti82c611a_set_piomode(struct ata_port *ap,
+ struct ata_device *adev)
+{
+ u8 active, recover, setup;
+ struct ata_timing t;
+ struct ata_device *pair = ata_dev_pair(adev);
+ int clock;
+ int khz[4] = { 50000, 40000, 33000, 25000 };
+ u8 rc;
+
+ /* Enter configuration mode */
+ ioread16(ap->ioaddr.error_addr);
+ ioread16(ap->ioaddr.error_addr);
+ iowrite8(3, ap->ioaddr.nsect_addr);
+
+ /* Read VLB clock strapping */
+ clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+ /* Setup timing is shared */
+ if (pair) {
+ struct ata_timing tp;
+ ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+ ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+ }
+
+ active = clamp_val(t.active, 2, 17) - 2;
+ recover = clamp_val(t.recover, 1, 16) - 1;
+ setup = clamp_val(t.setup, 1, 4) - 1;
+
+ /* Select the right timing bank for write timing */
+ rc = ioread8(ap->ioaddr.lbal_addr);
+ rc &= 0x7F;
+ rc |= (adev->devno << 7);
+ iowrite8(rc, ap->ioaddr.lbal_addr);
+
+ /* Write the timings */
+ iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+ /* Select the right bank for read timings, also
+ load the shared timings for address */
+ rc = ioread8(ap->ioaddr.device_addr);
+ rc &= 0xC0;
+ rc |= adev->devno; /* Index select */
+ rc |= (setup << 4) | 0x04;
+ iowrite8(rc, ap->ioaddr.device_addr);
+
+ /* Load the read timings */
+ iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+ /* Ensure the timing register mode is right */
+ rc = ioread8(ap->ioaddr.lbal_addr);
+ rc &= 0x73;
+ rc |= 0x84;
+ iowrite8(rc, ap->ioaddr.lbal_addr);
+
+ /* Exit command mode */
+ iowrite8(0x83, ap->ioaddr.nsect_addr);
+}
+
+
+static struct ata_port_operations opti82c611a_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c611a_set_piomode,
+};
+
+/*
+ * Opti 82C465MV
+ *
+ * This controller supports PIO0 to PIO3. Unlike the 611A the MVB
+ * version is dual channel but doesn't have a lot of unique registers.
+ */
+
+static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ u8 active, recover, setup;
+ struct ata_timing t;
+ struct ata_device *pair = ata_dev_pair(adev);
+ int clock;
+ int khz[4] = { 50000, 40000, 33000, 25000 };
+ u8 rc;
+ u8 sysclk;
+
+ /* Get the clock */
+ sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
+
+ /* Enter configuration mode */
+ ioread16(ap->ioaddr.error_addr);
+ ioread16(ap->ioaddr.error_addr);
+ iowrite8(3, ap->ioaddr.nsect_addr);
+
+ /* Read VLB clock strapping */
+ clock = 1000000000 / khz[sysclk];
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
+
+ /* Setup timing is shared */
+ if (pair) {
+ struct ata_timing tp;
+ ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
+
+ ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
+ }
+
+ active = clamp_val(t.active, 2, 17) - 2;
+ recover = clamp_val(t.recover, 1, 16) - 1;
+ setup = clamp_val(t.setup, 1, 4) - 1;
+
+ /* Select the right timing bank for write timing */
+ rc = ioread8(ap->ioaddr.lbal_addr);
+ rc &= 0x7F;
+ rc |= (adev->devno << 7);
+ iowrite8(rc, ap->ioaddr.lbal_addr);
+
+ /* Write the timings */
+ iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
+
+ /* Select the right bank for read timings, also
+ load the shared timings for address */
+ rc = ioread8(ap->ioaddr.device_addr);
+ rc &= 0xC0;
+ rc |= adev->devno; /* Index select */
+ rc |= (setup << 4) | 0x04;
+ iowrite8(rc, ap->ioaddr.device_addr);
+
+ /* Load the read timings */
+ iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
+
+ /* Ensure the timing register mode is right */
+ rc = ioread8(ap->ioaddr.lbal_addr);
+ rc &= 0x73;
+ rc |= 0x84;
+ iowrite8(rc, ap->ioaddr.lbal_addr);
+
+ /* Exit command mode */
+ iowrite8(0x83, ap->ioaddr.nsect_addr);
+
+ /* We need to know this for quad device on the MVB */
+ ap->host->private_data = ap;
+}
+
+/**
+ * opt82c465mv_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings. The
+ * MVB has a single set of timing registers and these are shared
+ * across channels. As there are two registers we really ought to
+ * track the last two used values as a sort of register window. For
+ * now we just reload on a channel switch. On the single channel
+ * setup this condition never fires so we do nothing extra.
+ *
+ * FIXME: dual channel needs ->serialize support
+ */
+
+static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ /* If timings are set and for the wrong channel (2nd test is
+ due to a libata shortcoming and will eventually go I hope) */
+ if (ap->host->private_data != ap->host
+ && ap->host->private_data != NULL)
+ opti82c46x_set_piomode(ap, adev);
+
+ return ata_sff_qc_issue(qc);
+}
+
+static struct ata_port_operations opti82c46x_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = opti82c46x_set_piomode,
+ .qc_issue = opti82c46x_qc_issue,
+};
+
+static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct legacy_data *ld_qdi = ap->host->private_data;
+ int active, recovery;
+ u8 timing;
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ if (ld_qdi->fast) {
+ active = 8 - clamp_val(t.active, 1, 8);
+ recovery = 18 - clamp_val(t.recover, 3, 18);
+ } else {
+ active = 9 - clamp_val(t.active, 2, 9);
+ recovery = 15 - clamp_val(t.recover, 0, 15);
+ }
+ timing = (recovery << 4) | active | 0x08;
+
+ ld_qdi->clock[adev->devno] = timing;
+
+ outb(timing, ld_qdi->timing);
+}
+
+/**
+ * qdi6580dp_set_piomode - PIO setup for dual channel
+ * @ap: Port
+ * @adev: Device
+ * @irq: interrupt line
+ *
+ * In dual channel mode the 6580 has one clock per channel and we have
+ * to software clockswitch in qc_issue.
+ */
+
+static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct legacy_data *ld_qdi = ap->host->private_data;
+ int active, recovery;
+ u8 timing;
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ if (ld_qdi->fast) {
+ active = 8 - clamp_val(t.active, 1, 8);
+ recovery = 18 - clamp_val(t.recover, 3, 18);
+ } else {
+ active = 9 - clamp_val(t.active, 2, 9);
+ recovery = 15 - clamp_val(t.recover, 0, 15);
+ }
+ timing = (recovery << 4) | active | 0x08;
+
+ ld_qdi->clock[adev->devno] = timing;
+
+ outb(timing, ld_qdi->timing + 2 * ap->port_no);
+ /* Clear the FIFO */
+ if (adev->class != ATA_DEV_ATA)
+ outb(0x5F, ld_qdi->timing + 3);
+}
+
+/**
+ * qdi6580_set_piomode - PIO setup for single channel
+ * @ap: Port
+ * @adev: Device
+ *
+ * In single channel mode the 6580 has one clock per device and we can
+ * avoid the requirement to clock switch. We also have to load the timing
+ * into the right clock according to whether we are master or slave.
+ */
+
+static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct legacy_data *ld_qdi = ap->host->private_data;
+ int active, recovery;
+ u8 timing;
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ if (ld_qdi->fast) {
+ active = 8 - clamp_val(t.active, 1, 8);
+ recovery = 18 - clamp_val(t.recover, 3, 18);
+ } else {
+ active = 9 - clamp_val(t.active, 2, 9);
+ recovery = 15 - clamp_val(t.recover, 0, 15);
+ }
+ timing = (recovery << 4) | active | 0x08;
+ ld_qdi->clock[adev->devno] = timing;
+ outb(timing, ld_qdi->timing + 2 * adev->devno);
+ /* Clear the FIFO */
+ if (adev->class != ATA_DEV_ATA)
+ outb(0x5F, ld_qdi->timing + 3);
+}
+
+/**
+ * qdi_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings.
+ */
+
+static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct legacy_data *ld_qdi = ap->host->private_data;
+
+ if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
+ if (adev->pio_mode) {
+ ld_qdi->last = ld_qdi->clock[adev->devno];
+ outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
+ 2 * ap->port_no);
+ }
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = adev->link->ap;
+ int slop = buflen & 3;
+
+ if (ata_id_has_dword_io(adev->id)) {
+ if (rw == WRITE)
+ iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ else
+ ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+ if (unlikely(slop)) {
+ __le32 pad;
+ if (rw == WRITE) {
+ memcpy(&pad, buf + buflen - slop, slop);
+ iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ } else {
+ pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ memcpy(buf + buflen - slop, &pad, slop);
+ }
+ }
+ return (buflen + 3) & ~3;
+ } else
+ return ata_sff_data_xfer(adev, buf, buflen, rw);
+}
+
+static int qdi_port(struct platform_device *dev,
+ struct legacy_probe *lp, struct legacy_data *ld)
+{
+ if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
+ return -EBUSY;
+ ld->timing = lp->private;
+ return 0;
+}
+
+static struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6500_set_piomode,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = vlb32_data_xfer,
+};
+
+static struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+};
+
+static struct ata_port_operations qdi6580dp_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = qdi6580dp_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+};
+
+static DEFINE_SPINLOCK(winbond_lock);
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ outb(val, port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+ u8 val;
+
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ val = inb(port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+
+ return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct legacy_data *ld_winbond = ap->host->private_data;
+ int active, recovery;
+ u8 reg;
+ int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+ reg = winbond_readcfg(ld_winbond->timing, 0x81);
+
+ /* Get the timing data in cycles */
+ if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+ else
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
+ recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
+ timing = (active << 4) | recovery;
+ winbond_writecfg(ld_winbond->timing, timing, reg);
+
+ /* Load the setup timing */
+
+ reg = 0x35;
+ if (adev->class != ATA_DEV_ATA)
+ reg |= 0x08; /* FIFO off */
+ if (!ata_pio_need_iordy(adev))
+ reg |= 0x02; /* IORDY off */
+ reg |= (clamp_val(t.setup, 0, 3) << 6);
+ winbond_writecfg(ld_winbond->timing, timing + 1, reg);
+}
+
+static int winbond_port(struct platform_device *dev,
+ struct legacy_probe *lp, struct legacy_data *ld)
+{
+ if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
+ return -EBUSY;
+ ld->timing = lp->private;
+ return 0;
+}
+
+static struct ata_port_operations winbond_port_ops = {
+ .inherits = &legacy_base_port_ops,
+ .set_piomode = winbond_set_piomode,
+ .sff_data_xfer = vlb32_data_xfer,
+};
+
+static struct legacy_controller controllers[] = {
+ {"BIOS", &legacy_port_ops, 0x1F,
+ ATA_FLAG_NO_IORDY, NULL },
+ {"Snooping", &simple_port_ops, 0x1F,
+ 0 , NULL },
+ {"PDC20230", &pdc20230_port_ops, 0x7,
+ ATA_FLAG_NO_IORDY, NULL },
+ {"HT6560A", &ht6560a_port_ops, 0x07,
+ ATA_FLAG_NO_IORDY, NULL },
+ {"HT6560B", &ht6560b_port_ops, 0x1F,
+ ATA_FLAG_NO_IORDY, NULL },
+ {"OPTI82C611A", &opti82c611a_port_ops, 0x0F,
+ 0 , NULL },
+ {"OPTI82C46X", &opti82c46x_port_ops, 0x0F,
+ 0 , NULL },
+ {"QDI6500", &qdi6500_port_ops, 0x07,
+ ATA_FLAG_NO_IORDY, qdi_port },
+ {"QDI6580", &qdi6580_port_ops, 0x1F,
+ 0 , qdi_port },
+ {"QDI6580DP", &qdi6580dp_port_ops, 0x1F,
+ 0 , qdi_port },
+ {"W83759A", &winbond_port_ops, 0x1F,
+ 0 , winbond_port }
+};
+
+/**
+ * probe_chip_type - Discover controller
+ * @probe: Probe entry to check
+ *
+ * Probe an ATA port and identify the type of controller. We don't
+ * check if the controller appears to be driveless at this point.
+ */
+
+static __init int probe_chip_type(struct legacy_probe *probe)
+{
+ int mask = 1 << probe->slot;
+
+ if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
+ u8 reg = winbond_readcfg(winbond, 0x81);
+ reg |= 0x80; /* jumpered mode off */
+ winbond_writecfg(winbond, 0x81, reg);
+ reg = winbond_readcfg(winbond, 0x83);
+ reg |= 0xF0; /* local control */
+ winbond_writecfg(winbond, 0x83, reg);
+ reg = winbond_readcfg(winbond, 0x85);
+ reg |= 0xF0; /* programmable timing */
+ winbond_writecfg(winbond, 0x85, reg);
+
+ reg = winbond_readcfg(winbond, 0x81);
+
+ if (reg & mask)
+ return W83759A;
+ }
+ if (probe->port == 0x1F0) {
+ unsigned long flags;
+ local_irq_save(flags);
+ /* Probes */
+ outb(inb(0x1F2) | 0x80, 0x1F2);
+ inb(0x1F5);
+ inb(0x1F2);
+ inb(0x3F6);
+ inb(0x3F6);
+ inb(0x1F2);
+ inb(0x1F2);
+
+ if ((inb(0x1F2) & 0x80) == 0) {
+ /* PDC20230c or 20630 ? */
+ printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller"
+ " detected.\n");
+ udelay(100);
+ inb(0x1F5);
+ local_irq_restore(flags);
+ return PDC20230;
+ } else {
+ outb(0x55, 0x1F2);
+ inb(0x1F2);
+ inb(0x1F2);
+ if (inb(0x1F2) == 0x00)
+ printk(KERN_INFO "PDC20230-B VLB ATA "
+ "controller detected.\n");
+ local_irq_restore(flags);
+ return BIOS;
+ }
+ local_irq_restore(flags);
+ }
+
+ if (ht6560a & mask)
+ return HT6560A;
+ if (ht6560b & mask)
+ return HT6560B;
+ if (opti82c611a & mask)
+ return OPTI611A;
+ if (opti82c46x & mask)
+ return OPTI46X;
+ if (autospeed & mask)
+ return SNOOP;
+ return BIOS;
+}
+
+
+/**
+ * legacy_init_one - attach a legacy interface
+ * @pl: probe record
+ *
+ * Register an ISA bus IDE interface. Such interfaces are PIO and we
+ * assume do not support IRQ sharing.
+ */
+
+static __init int legacy_init_one(struct legacy_probe *probe)
+{
+ struct legacy_controller *controller = &controllers[probe->type];
+ int pio_modes = controller->pio_mask;
+ unsigned long io = probe->port;
+ u32 mask = (1 << probe->slot);
+ struct ata_port_operations *ops = controller->ops;
+ struct legacy_data *ld = &legacy_data[probe->slot];
+ struct ata_host *host = NULL;
+ struct ata_port *ap;
+ struct platform_device *pdev;
+ struct ata_device *dev;
+ void __iomem *io_addr, *ctrl_addr;
+ u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
+ int ret;
+
+ iordy |= controller->flags;
+
+ pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = -EBUSY;
+ if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
+ devm_request_region(&pdev->dev, io + 0x0206, 1,
+ "pata_legacy") == NULL)
+ goto fail;
+
+ ret = -ENOMEM;
+ io_addr = devm_ioport_map(&pdev->dev, io, 8);
+ ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
+ if (!io_addr || !ctrl_addr)
+ goto fail;
+ if (controller->setup)
+ if (controller->setup(pdev, probe, ld) < 0)
+ goto fail;
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto fail;
+ ap = host->ports[0];
+
+ ap->ops = ops;
+ ap->pio_mask = pio_modes;
+ ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
+ ap->ioaddr.cmd_addr = io_addr;
+ ap->ioaddr.altstatus_addr = ctrl_addr;
+ ap->ioaddr.ctl_addr = ctrl_addr;
+ ata_sff_std_ports(&ap->ioaddr);
+ ap->host->private_data = ld;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
+
+ ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0,
+ &legacy_sht);
+ if (ret)
+ goto fail;
+ ld->platform_dev = pdev;
+
+ /* Nothing found means we drop the port as its probably not there */
+
+ ret = -ENODEV;
+ ata_link_for_each_dev(dev, &ap->link) {
+ if (!ata_dev_absent(dev)) {
+ legacy_host[probe->slot] = host;
+ ld->platform_dev = pdev;
+ return 0;
+ }
+ }
+fail:
+ platform_device_unregister(pdev);
+ return ret;
+}
+
+/**
+ * legacy_check_special_cases - ATA special cases
+ * @p: PCI device to check
+ * @master: set this if we find an ATA master
+ * @master: set this if we find an ATA secondary
+ *
+ * A small number of vendors implemented early PCI ATA interfaces
+ * on bridge logic without the ATA interface being PCI visible.
+ * Where we have a matching PCI driver we must skip the relevant
+ * device here. If we don't know about it then the legacy driver
+ * is the right driver anyway.
+ */
+
+static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
+ int *secondary)
+{
+ /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
+ if (p->vendor == 0x1078 && p->device == 0x0000) {
+ *primary = *secondary = 1;
+ return;
+ }
+ /* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
+ if (p->vendor == 0x1078 && p->device == 0x0002) {
+ *primary = *secondary = 1;
+ return;
+ }
+ /* Intel MPIIX - PIO ATA on non PCI side of bridge */
+ if (p->vendor == 0x8086 && p->device == 0x1234) {
+ u16 r;
+ pci_read_config_word(p, 0x6C, &r);
+ if (r & 0x8000) {
+ /* ATA port enabled */
+ if (r & 0x4000)
+ *secondary = 1;
+ else
+ *primary = 1;
+ }
+ return;
+ }
+}
+
+static __init void probe_opti_vlb(void)
+{
+ /* If an OPTI 82C46X is present find out where the channels are */
+ static const char *optis[4] = {
+ "3/463MV", "5MV",
+ "5MVA", "5MVB"
+ };
+ u8 chans = 1;
+ u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
+
+ opti82c46x = 3; /* Assume master and slave first */
+ printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
+ optis[ctrl]);
+ if (ctrl == 3)
+ chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
+ ctrl = opti_syscfg(0xAC);
+ /* Check enabled and this port is the 465MV port. On the
+ MVB we may have two channels */
+ if (ctrl & 8) {
+ if (chans == 2) {
+ legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+ legacy_probe_add(0x170, 15, OPTI46X, 0);
+ }
+ if (ctrl & 4)
+ legacy_probe_add(0x170, 15, OPTI46X, 0);
+ else
+ legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+ } else
+ legacy_probe_add(0x1F0, 14, OPTI46X, 0);
+}
+
+static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
+{
+ static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
+ /* Check card type */
+ if ((r & 0xF0) == 0xC0) {
+ /* QD6500: single channel */
+ if (r & 8)
+ /* Disabled ? */
+ return;
+ legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
+ QDI6500, port);
+ }
+ if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
+ /* QD6580: dual channel */
+ if (!request_region(port + 2 , 2, "pata_qdi")) {
+ release_region(port, 2);
+ return;
+ }
+ res = inb(port + 3);
+ /* Single channel mode ? */
+ if (res & 1)
+ legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
+ QDI6580, port);
+ else { /* Dual channel mode */
+ legacy_probe_add(0x1F0, 14, QDI6580DP, port);
+ /* port + 0x02, r & 0x04 */
+ legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
+ }
+ release_region(port + 2, 2);
+ }
+}
+
+static __init void probe_qdi_vlb(void)
+{
+ unsigned long flags;
+ static const unsigned long qd_port[2] = { 0x30, 0xB0 };
+ int i;
+
+ /*
+ * Check each possible QD65xx base address
+ */
+
+ for (i = 0; i < 2; i++) {
+ unsigned long port = qd_port[i];
+ u8 r, res;
+
+
+ if (request_region(port, 2, "pata_qdi")) {
+ /* Check for a card */
+ local_irq_save(flags);
+ /* I have no h/w that needs this delay but it
+ is present in the historic code */
+ r = inb(port);
+ udelay(1);
+ outb(0x19, port);
+ udelay(1);
+ res = inb(port);
+ udelay(1);
+ outb(r, port);
+ udelay(1);
+ local_irq_restore(flags);
+
+ /* Fail */
+ if (res == 0x19) {
+ release_region(port, 2);
+ continue;
+ }
+ /* Passes the presence test */
+ r = inb(port + 1);
+ udelay(1);
+ /* Check port agrees with port set */
+ if ((r & 2) >> 1 == i)
+ qdi65_identify_port(r, res, port);
+ release_region(port, 2);
+ }
+ }
+}
+
+/**
+ * legacy_init - attach legacy interfaces
+ *
+ * Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
+ * Right now we do not scan the ide0 and ide1 address but should do so
+ * for non PCI systems or systems with no PCI IDE legacy mode devices.
+ * If you fix that note there are special cases to consider like VLB
+ * drivers and CS5510/20.
+ */
+
+static __init int legacy_init(void)
+{
+ int i;
+ int ct = 0;
+ int primary = 0;
+ int secondary = 0;
+ int pci_present = 0;
+ struct legacy_probe *pl = &probe_list[0];
+ int slot = 0;
+
+ struct pci_dev *p = NULL;
+
+ for_each_pci_dev(p) {
+ int r;
+ /* Check for any overlap of the system ATA mappings. Native
+ mode controllers stuck on these addresses or some devices
+ in 'raid' mode won't be found by the storage class test */
+ for (r = 0; r < 6; r++) {
+ if (pci_resource_start(p, r) == 0x1f0)
+ primary = 1;
+ if (pci_resource_start(p, r) == 0x170)
+ secondary = 1;
+ }
+ /* Check for special cases */
+ legacy_check_special_cases(p, &primary, &secondary);
+
+ /* If PCI bus is present then don't probe for tertiary
+ legacy ports */
+ pci_present = 1;
+ }
+
+ if (winbond == 1)
+ winbond = 0x130; /* Default port, alt is 1B0 */
+
+ if (primary == 0 || all)
+ legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
+ if (secondary == 0 || all)
+ legacy_probe_add(0x170, 15, UNKNOWN, 0);
+
+ if (probe_all || !pci_present) {
+ /* ISA/VLB extra ports */
+ legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
+ legacy_probe_add(0x168, 10, UNKNOWN, 0);
+ legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
+ legacy_probe_add(0x160, 12, UNKNOWN, 0);
+ }
+
+ if (opti82c46x)
+ probe_opti_vlb();
+ if (qdi)
+ probe_qdi_vlb();
+
+ for (i = 0; i < NR_HOST; i++, pl++) {
+ if (pl->port == 0)
+ continue;
+ if (pl->type == UNKNOWN)
+ pl->type = probe_chip_type(pl);
+ pl->slot = slot++;
+ if (legacy_init_one(pl) == 0)
+ ct++;
+ }
+ if (ct != 0)
+ return 0;
+ return -ENODEV;
+}
+
+static __exit void legacy_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_legacy_host; i++) {
+ struct legacy_data *ld = &legacy_data[i];
+ ata_host_detach(legacy_host[i]);
+ platform_device_unregister(ld->platform_dev);
+ }
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for legacy ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(probe_all, int, 0);
+module_param(autospeed, int, 0);
+module_param(ht6560a, int, 0);
+module_param(ht6560b, int, 0);
+module_param(opti82c611a, int, 0);
+module_param(opti82c46x, int, 0);
+module_param(qdi, int, 0);
+module_param(pio_mask, int, 0);
+module_param(iordy_mask, int, 0);
+
+module_init(legacy_init);
+module_exit(legacy_exit);
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
new file mode 100644
index 0000000..76e399b
--- /dev/null
+++ b/drivers/ata/pata_marvell.c
@@ -0,0 +1,196 @@
+/*
+ * Marvell PATA driver.
+ *
+ * For the moment we drive the PATA port in legacy mode. That
+ * isn't making full use of the device functionality but it is
+ * easy to get working.
+ *
+ * (c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_marvell"
+#define DRV_VERSION "0.1.6"
+
+/**
+ * marvell_pata_active - check if PATA is active
+ * @pdev: PCI device
+ *
+ * Returns 1 if the PATA port may be active. We know how to check this
+ * for the 6145 but not the other devices
+ */
+
+static int marvell_pata_active(struct pci_dev *pdev)
+{
+ int i;
+ u32 devices;
+ void __iomem *barp;
+
+ /* We don't yet know how to do this for other devices */
+ if (pdev->device != 0x6145)
+ return 1;
+
+ barp = pci_iomap(pdev, 5, 0x10);
+ if (barp == NULL)
+ return -ENOMEM;
+
+ printk("BAR5:");
+ for(i = 0; i <= 0x0F; i++)
+ printk("%02X:%02X ", i, ioread8(barp + i));
+ printk("\n");
+
+ devices = ioread32(barp + 0x0C);
+ pci_iounmap(pdev, barp);
+
+ if (devices & 0x10)
+ return 1;
+ return 0;
+}
+
+/**
+ * marvell_pre_reset - check for 40/80 pin
+ * @link: link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform the PATA port setup we need.
+ */
+
+static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (pdev->device == 0x6145 && ap->port_no == 0 &&
+ !marvell_pata_active(pdev)) /* PATA enable ? */
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+static int marvell_cable_detect(struct ata_port *ap)
+{
+ /* Cable type */
+ switch(ap->port_no)
+ {
+ case 0:
+ if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+ case 1: /* Legacy SATA port */
+ return ATA_CBL_SATA;
+ }
+
+ BUG();
+ return 0; /* Our BUG macro needs the right markup */
+}
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template marvell_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations marvell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = marvell_cable_detect,
+ .prereset = marvell_pre_reset,
+};
+
+
+/**
+ * marvell_init_one - Register Marvell ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in marvell_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+
+ .port_ops = &marvell_ops,
+ };
+ static const struct ata_port_info info_sata = {
+ /* Slave possible as its magically mapped not real */
+ .flags = ATA_FLAG_SLAVE_POSS,
+
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+
+ .port_ops = &marvell_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, &info_sata };
+
+ if (pdev->device == 0x6101)
+ ppi[1] = &ata_dummy_port_info;
+
+#if defined(CONFIG_AHCI) || defined(CONFIG_AHCI_MODULE)
+ if (!marvell_pata_active(pdev)) {
+ printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
+ return -ENODEV;
+ }
+#endif
+ return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL);
+}
+
+static const struct pci_device_id marvell_pci_tbl[] = {
+ { PCI_DEVICE(0x11AB, 0x6101), },
+ { PCI_DEVICE(0x11AB, 0x6121), },
+ { PCI_DEVICE(0x11AB, 0x6123), },
+ { PCI_DEVICE(0x11AB, 0x6145), },
+ { } /* terminate list */
+};
+
+static struct pci_driver marvell_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = marvell_pci_tbl,
+ .probe = marvell_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init marvell_init(void)
+{
+ return pci_register_driver(&marvell_pci_driver);
+}
+
+static void __exit marvell_exit(void)
+{
+ pci_unregister_driver(&marvell_pci_driver);
+}
+
+module_init(marvell_init);
+module_exit(marvell_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, marvell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
new file mode 100644
index 0000000..a9e8273
--- /dev/null
+++ b/drivers/ata/pata_mpc52xx.c
@@ -0,0 +1,511 @@
+/*
+ * drivers/ata/pata_mpc52xx.c
+ *
+ * libata driver for the Freescale MPC52xx on-chip IDE interface
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/libata.h>
+#include <linux/of_platform.h>
+
+#include <asm/types.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+
+
+#define DRV_NAME "mpc52xx_ata"
+#define DRV_VERSION "0.1.2"
+
+
+/* Private structures used by the driver */
+struct mpc52xx_ata_timings {
+ u32 pio1;
+ u32 pio2;
+};
+
+struct mpc52xx_ata_priv {
+ unsigned int ipb_period;
+ struct mpc52xx_ata __iomem * ata_regs;
+ int ata_irq;
+ struct mpc52xx_ata_timings timings[2];
+ int csel;
+};
+
+
+/* ATAPI-4 PIO specs (in ns) */
+static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
+static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
+static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
+static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
+static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
+static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
+static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
+
+#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
+
+
+/* Bit definitions inside the registers */
+#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */
+#define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */
+#define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */
+#define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */
+
+#define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */
+#define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */
+#define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */
+#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */
+
+#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */
+
+#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */
+#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */
+#define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */
+#define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */
+#define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */
+#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */
+#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */
+
+
+/* Structure of the hardware registers */
+struct mpc52xx_ata {
+
+ /* Host interface registers */
+ u32 config; /* ATA + 0x00 Host configuration */
+ u32 host_status; /* ATA + 0x04 Host controller status */
+ u32 pio1; /* ATA + 0x08 PIO Timing 1 */
+ u32 pio2; /* ATA + 0x0c PIO Timing 2 */
+ u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */
+ u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */
+ u32 udma1; /* ATA + 0x18 UDMA Timing 1 */
+ u32 udma2; /* ATA + 0x1c UDMA Timing 2 */
+ u32 udma3; /* ATA + 0x20 UDMA Timing 3 */
+ u32 udma4; /* ATA + 0x24 UDMA Timing 4 */
+ u32 udma5; /* ATA + 0x28 UDMA Timing 5 */
+ u32 share_cnt; /* ATA + 0x2c ATA share counter */
+ u32 reserved0[3];
+
+ /* FIFO registers */
+ u32 fifo_data; /* ATA + 0x3c */
+ u8 fifo_status_frame; /* ATA + 0x40 */
+ u8 fifo_status; /* ATA + 0x41 */
+ u16 reserved7[1];
+ u8 fifo_control; /* ATA + 0x44 */
+ u8 reserved8[5];
+ u16 fifo_alarm; /* ATA + 0x4a */
+ u16 reserved9;
+ u16 fifo_rdp; /* ATA + 0x4e */
+ u16 reserved10;
+ u16 fifo_wrp; /* ATA + 0x52 */
+ u16 reserved11;
+ u16 fifo_lfrdp; /* ATA + 0x56 */
+ u16 reserved12;
+ u16 fifo_lfwrp; /* ATA + 0x5a */
+
+ /* Drive TaskFile registers */
+ u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */
+ u8 reserved13[3];
+ u16 tf_data; /* ATA + 0x60 TASKFILE Data */
+ u16 reserved14;
+ u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */
+ u8 reserved15[3];
+ u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */
+ u8 reserved16[3];
+ u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */
+ u8 reserved17[3];
+ u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */
+ u8 reserved18[3];
+ u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */
+ u8 reserved19[3];
+ u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */
+ u8 reserved20[3];
+ u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */
+ u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */
+ u8 reserved21[2];
+};
+
+
+/* ======================================================================== */
+/* Aux fns */
+/* ======================================================================== */
+
+
+/* MPC52xx low level hw control */
+
+static int
+mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
+{
+ struct mpc52xx_ata_timings *timing = &priv->timings[dev];
+ unsigned int ipb_period = priv->ipb_period;
+ unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
+
+ if ((pio<0) || (pio>4))
+ return -EINVAL;
+
+ t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
+ t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
+ t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
+ t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
+ t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
+ t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
+ ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
+
+ timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
+ timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
+
+ return 0;
+}
+
+static void
+mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
+{
+ struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+ struct mpc52xx_ata_timings *timing = &priv->timings[device];
+
+ out_be32(&regs->pio1, timing->pio1);
+ out_be32(&regs->pio2, timing->pio2);
+ out_be32(&regs->mdma1, 0);
+ out_be32(&regs->mdma2, 0);
+ out_be32(&regs->udma1, 0);
+ out_be32(&regs->udma2, 0);
+ out_be32(&regs->udma3, 0);
+ out_be32(&regs->udma4, 0);
+ out_be32(&regs->udma5, 0);
+
+ priv->csel = device;
+}
+
+static int
+mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
+{
+ struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+ int tslot;
+
+ /* Clear share_cnt (all sample code do this ...) */
+ out_be32(&regs->share_cnt, 0);
+
+ /* Configure and reset host */
+ out_be32(&regs->config,
+ MPC52xx_ATA_HOSTCONF_IE |
+ MPC52xx_ATA_HOSTCONF_IORDY |
+ MPC52xx_ATA_HOSTCONF_SMR |
+ MPC52xx_ATA_HOSTCONF_FR);
+
+ udelay(10);
+
+ out_be32(&regs->config,
+ MPC52xx_ATA_HOSTCONF_IE |
+ MPC52xx_ATA_HOSTCONF_IORDY);
+
+ /* Set the time slot to 1us */
+ tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
+ out_be32(&regs->share_cnt, tslot << 16 );
+
+ /* Init timings to PIO0 */
+ memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
+
+ mpc52xx_ata_compute_pio_timings(priv, 0, 0);
+ mpc52xx_ata_compute_pio_timings(priv, 1, 0);
+
+ mpc52xx_ata_apply_timings(priv, 0);
+
+ return 0;
+}
+
+
+/* ======================================================================== */
+/* libata driver */
+/* ======================================================================== */
+
+static void
+mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+ int pio, rv;
+
+ pio = adev->pio_mode - XFER_PIO_0;
+
+ rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
+
+ if (rv) {
+ printk(KERN_ERR DRV_NAME
+ ": Trying to select invalid PIO mode %d\n", pio);
+ return;
+ }
+
+ mpc52xx_ata_apply_timings(priv, adev->devno);
+}
+static void
+mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
+{
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+ if (device != priv->csel)
+ mpc52xx_ata_apply_timings(priv, device);
+
+ ata_sff_dev_select(ap,device);
+}
+
+static struct scsi_host_template mpc52xx_ata_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations mpc52xx_ata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_dev_select = mpc52xx_ata_dev_select,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = mpc52xx_ata_set_piomode,
+ .post_internal_cmd = ATA_OP_NULL,
+};
+
+static int __devinit
+mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
+ unsigned long raw_ata_regs)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct ata_ioports *aio;
+
+ host = ata_host_alloc(dev, 1);
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->pio_mask = 0x1f; /* Up to PIO4 */
+ ap->mwdma_mask = 0x00; /* No MWDMA */
+ ap->udma_mask = 0x00; /* No UDMA */
+ ap->ops = &mpc52xx_ata_port_ops;
+ host->private_data = priv;
+
+ aio = &ap->ioaddr;
+ aio->cmd_addr = NULL; /* Don't have a classic reg block */
+ aio->altstatus_addr = &priv->ata_regs->tf_control;
+ aio->ctl_addr = &priv->ata_regs->tf_control;
+ aio->data_addr = &priv->ata_regs->tf_data;
+ aio->error_addr = &priv->ata_regs->tf_features;
+ aio->feature_addr = &priv->ata_regs->tf_features;
+ aio->nsect_addr = &priv->ata_regs->tf_sec_count;
+ aio->lbal_addr = &priv->ata_regs->tf_sec_num;
+ aio->lbam_addr = &priv->ata_regs->tf_cyl_low;
+ aio->lbah_addr = &priv->ata_regs->tf_cyl_high;
+ aio->device_addr = &priv->ata_regs->tf_dev_head;
+ aio->status_addr = &priv->ata_regs->tf_command;
+ aio->command_addr = &priv->ata_regs->tf_command;
+
+ ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
+
+ /* activate host */
+ return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0,
+ &mpc52xx_ata_sht);
+}
+
+static struct mpc52xx_ata_priv *
+mpc52xx_ata_remove_one(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct mpc52xx_ata_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ return priv;
+}
+
+
+/* ======================================================================== */
+/* OF Platform driver */
+/* ======================================================================== */
+
+static int __devinit
+mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
+{
+ unsigned int ipb_freq;
+ struct resource res_mem;
+ int ata_irq;
+ struct mpc52xx_ata __iomem *ata_regs;
+ struct mpc52xx_ata_priv *priv;
+ int rv;
+
+ /* Get ipb frequency */
+ ipb_freq = mpc52xx_find_ipb_freq(op->node);
+ if (!ipb_freq) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Unable to find IPB Bus frequency\n" );
+ return -ENODEV;
+ }
+
+ /* Get IRQ and register */
+ rv = of_address_to_resource(op->node, 0, &res_mem);
+ if (rv) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while parsing device node resource\n" );
+ return rv;
+ }
+
+ ata_irq = irq_of_parse_and_map(op->node, 0);
+ if (ata_irq == NO_IRQ) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while mapping the irq\n");
+ return -EINVAL;
+ }
+
+ /* Request mem region */
+ if (!devm_request_mem_region(&op->dev, res_mem.start,
+ sizeof(struct mpc52xx_ata), DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while requesting mem region\n");
+ rv = -EBUSY;
+ goto err;
+ }
+
+ /* Remap registers */
+ ata_regs = devm_ioremap(&op->dev, res_mem.start,
+ sizeof(struct mpc52xx_ata));
+ if (!ata_regs) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while mapping register set\n");
+ rv = -ENOMEM;
+ goto err;
+ }
+
+ /* Prepare our private structure */
+ priv = devm_kzalloc(&op->dev, sizeof(struct mpc52xx_ata_priv),
+ GFP_ATOMIC);
+ if (!priv) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while allocating private structure\n");
+ rv = -ENOMEM;
+ goto err;
+ }
+
+ priv->ipb_period = 1000000000 / (ipb_freq / 1000);
+ priv->ata_regs = ata_regs;
+ priv->ata_irq = ata_irq;
+ priv->csel = -1;
+
+ /* Init the hw */
+ rv = mpc52xx_ata_hw_init(priv);
+ if (rv) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ goto err;
+ }
+
+ /* Register ourselves to libata */
+ rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start);
+ if (rv) {
+ printk(KERN_ERR DRV_NAME ": "
+ "Error while registering to ATA layer\n");
+ return rv;
+ }
+
+ /* Done */
+ return 0;
+
+ /* Error path */
+err:
+ irq_dispose_mapping(ata_irq);
+ return rv;
+}
+
+static int
+mpc52xx_ata_remove(struct of_device *op)
+{
+ struct mpc52xx_ata_priv *priv;
+
+ priv = mpc52xx_ata_remove_one(&op->dev);
+ irq_dispose_mapping(priv->ata_irq);
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int
+mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+
+ return ata_host_suspend(host, state);
+}
+
+static int
+mpc52xx_ata_resume(struct of_device *op)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ struct mpc52xx_ata_priv *priv = host->private_data;
+ int rv;
+
+ rv = mpc52xx_ata_hw_init(priv);
+ if (rv) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return rv;
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+#endif
+
+
+static struct of_device_id mpc52xx_ata_of_match[] = {
+ { .compatible = "fsl,mpc5200-ata", },
+ { .compatible = "mpc5200-ata", },
+ {},
+};
+
+
+static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .match_table = mpc52xx_ata_of_match,
+ .probe = mpc52xx_ata_probe,
+ .remove = mpc52xx_ata_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_ata_suspend,
+ .resume = mpc52xx_ata_resume,
+#endif
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_ata_init(void)
+{
+ printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
+ return of_register_platform_driver(&mpc52xx_ata_of_platform_driver);
+}
+
+static void __exit
+mpc52xx_ata_exit(void)
+{
+ of_unregister_platform_driver(&mpc52xx_ata_of_platform_driver);
+}
+
+module_init(mpc52xx_ata_init);
+module_exit(mpc52xx_ata_exit);
+
+MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
+MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
new file mode 100644
index 0000000..7c8faa4
--- /dev/null
+++ b/drivers/ata/pata_mpiix.c
@@ -0,0 +1,251 @@
+/*
+ * pata_mpiix.c - Intel MPIIX PATA for new ATA layer
+ * (C) 2005-2006 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * The MPIIX is different enough to the PIIX4 and friends that we give it
+ * a separate driver. The old ide/pci code handles this by just not tuning
+ * MPIIX at all.
+ *
+ * The MPIIX also differs in another important way from the majority of PIIX
+ * devices. The chip is a bridge (pardon the pun) between the old world of
+ * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
+ * IDE controller is not decoded in PCI space and the chip does not claim to
+ * be IDE class PCI. This requires slightly non-standard probe logic compared
+ * with PCI IDE and also that we do not disable the device when our driver is
+ * unloaded (as it has many other functions).
+ *
+ * The driver conciously keeps this logic internally to avoid pushing quirky
+ * PATA history into the clean libata layer.
+ *
+ * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA
+ * hard disk present this driver will not detect it. This is not a bug. In this
+ * configuration the secondary port of the MPIIX is disabled and the addresses
+ * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
+ * to operate.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_mpiix"
+#define DRV_VERSION "0.7.6"
+
+enum {
+ IDETIM = 0x6C, /* IDE control register */
+ IORDY = (1 << 1),
+ PPE = (1 << 2),
+ FTIM = (1 << 0),
+ ENABLED = (1 << 15),
+ SECONDARY = (1 << 14)
+};
+
+static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
+
+ if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * mpiix_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. The MPIIX allows us to program the
+ * IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
+ * prefetching or IORDY are used.
+ *
+ * This would get very ugly because we can only program timing for one
+ * device at a time, the other gets PIO0. Fortunately libata calls
+ * our qc_issue command before a command is issued so we can flip the
+ * timings back and forth to reduce the pain.
+ */
+
+static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ int control = 0;
+ int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u16 idetim;
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(pdev, IDETIM, &idetim);
+
+ /* Mask the IORDY/TIME/PPE for this device */
+ if (adev->class == ATA_DEV_ATA)
+ control |= PPE; /* Enable prefetch/posting for disk */
+ if (ata_pio_need_iordy(adev))
+ control |= IORDY;
+ if (pio > 1)
+ control |= FTIM; /* This drive is on the fast timing bank */
+
+ /* Mask out timing and clear both TIME bank selects */
+ idetim &= 0xCCEE;
+ idetim &= ~(0x07 << (4 * adev->devno));
+ idetim |= control << (4 * adev->devno);
+
+ idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ pci_write_config_word(pdev, IDETIM, idetim);
+
+ /* We use ap->private_data as a pointer to the device currently
+ loaded for timing */
+ ap->private_data = adev;
+}
+
+/**
+ * mpiix_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary. Our logic also clears TIME0/TIME1 for the other device so
+ * that, even if we get this wrong, cycles to the other device will
+ * be made PIO0.
+ */
+
+static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ /* If modes have been configured and the channel data is not loaded
+ then load it. We have to check if pio_mode is set as the core code
+ does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+ logical */
+
+ if (adev->pio_mode && adev != ap->private_data)
+ mpiix_set_piomode(ap, adev);
+
+ return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template mpiix_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations mpiix_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = mpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = mpiix_set_piomode,
+ .prereset = mpiix_pre_reset,
+};
+
+static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ /* Single threaded by the PCI probe logic */
+ static int printed_version;
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *cmd_addr, *ctl_addr;
+ u16 idetim;
+ int cmd, ctl, irq;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+ host = ata_host_alloc(&dev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+ ap = host->ports[0];
+
+ /* MPIIX has many functions which can be turned on or off according
+ to other devices present. Make sure IDE is enabled before we try
+ and use it */
+
+ pci_read_config_word(dev, IDETIM, &idetim);
+ if (!(idetim & ENABLED))
+ return -ENODEV;
+
+ /* See if it's primary or secondary channel... */
+ if (!(idetim & SECONDARY)) {
+ cmd = 0x1F0;
+ ctl = 0x3F6;
+ irq = 14;
+ } else {
+ cmd = 0x170;
+ ctl = 0x376;
+ irq = 15;
+ }
+
+ cmd_addr = devm_ioport_map(&dev->dev, cmd, 8);
+ ctl_addr = devm_ioport_map(&dev->dev, ctl, 1);
+ if (!cmd_addr || !ctl_addr)
+ return -ENOMEM;
+
+ ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl);
+
+ /* We do our own plumbing to avoid leaking special cases for whacko
+ ancient hardware into the core code. There are two issues to
+ worry about. #1 The chip is a bridge so if in legacy mode and
+ without BARs set fools the setup. #2 If you pci_disable_device
+ the MPIIX your box goes castors up */
+
+ ap->ops = &mpiix_port_ops;
+ ap->pio_mask = 0x1F;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ ap->ioaddr.cmd_addr = cmd_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ap->ioaddr.altstatus_addr = ctl_addr;
+
+ /* Let libata fill in the port details */
+ ata_sff_std_ports(&ap->ioaddr);
+
+ /* activate host */
+ return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED,
+ &mpiix_sht);
+}
+
+static const struct pci_device_id mpiix[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
+
+ { },
+};
+
+static struct pci_driver mpiix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = mpiix,
+ .probe = mpiix_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init mpiix_init(void)
+{
+ return pci_register_driver(&mpiix_pci_driver);
+}
+
+static void __exit mpiix_exit(void)
+{
+ pci_unregister_driver(&mpiix_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, mpiix);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(mpiix_init);
+module_exit(mpiix_exit);
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
new file mode 100644
index 0000000..9dc05e1
--- /dev/null
+++ b/drivers/ata/pata_netcell.c
@@ -0,0 +1,112 @@
+/*
+ * pata_netcell.c - Netcell PATA driver
+ *
+ * (c) 2006 Red Hat
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_netcell"
+#define DRV_VERSION "0.1.7"
+
+/* No PIO or DMA methods needed for this device */
+
+static struct scsi_host_template netcell_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations netcell_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_80wire,
+};
+
+
+/**
+ * netcell_init_one - Register Netcell ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in netcell_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ /* Actually we don't really care about these as the
+ firmware deals with it */
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* UDMA 133 */
+ .port_ops = &netcell_ops,
+ };
+ const struct ata_port_info *port_info[] = { &info, NULL };
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Any chip specific setup/optimisation/messages here */
+ ata_pci_bmdma_clear_simplex(pdev);
+
+ /* And let the library code do the work */
+ return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL);
+}
+
+static const struct pci_device_id netcell_pci_tbl[] = {
+ { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver netcell_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = netcell_pci_tbl,
+ .probe = netcell_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init netcell_init(void)
+{
+ return pci_register_driver(&netcell_pci_driver);
+}
+
+static void __exit netcell_exit(void)
+{
+ pci_unregister_driver(&netcell_pci_driver);
+}
+
+module_init(netcell_init);
+module_exit(netcell_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
new file mode 100644
index 0000000..4dd9a3b
--- /dev/null
+++ b/drivers/ata/pata_ninja32.c
@@ -0,0 +1,208 @@
+/*
+ * pata_ninja32.c - Ninja32 PATA for new ATA layer
+ * (C) 2007 Red Hat Inc
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
+ *
+ * The Ninja32 Cardbus is not a generic SFF controller. Instead it is
+ * laid out as follows off BAR 0. This is based upon Mark Lord's delkin
+ * driver and the extensive analysis done by the BSD developers, notably
+ * ITOH Yasufumi.
+ *
+ * Base + 0x00 IRQ Status
+ * Base + 0x01 IRQ control
+ * Base + 0x02 Chipset control
+ * Base + 0x03 Unknown
+ * Base + 0x04 VDMA and reset control + wait bits
+ * Base + 0x08 BMIMBA
+ * Base + 0x0C DMA Length
+ * Base + 0x10 Taskfile
+ * Base + 0x18 BMDMA Status ?
+ * Base + 0x1C
+ * Base + 0x1D Bus master control
+ * bit 0 = enable
+ * bit 1 = 0 write/1 read
+ * bit 2 = 1 sgtable
+ * bit 3 = go
+ * bit 4-6 wait bits
+ * bit 7 = done
+ * Base + 0x1E AltStatus
+ * Base + 0x1F timing register
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_ninja32"
+#define DRV_VERSION "0.1.3"
+
+
+/**
+ * ninja32_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. Our timing registers are shared
+ * but we want to set the PIO timing by default.
+ */
+
+static void ninja32_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static u16 pio_timing[5] = {
+ 0xd6, 0x85, 0x44, 0x33, 0x13
+ };
+ iowrite8(pio_timing[adev->pio_mode - XFER_PIO_0],
+ ap->ioaddr.bmdma_addr + 0x1f);
+ ap->private_data = adev;
+}
+
+
+static void ninja32_dev_select(struct ata_port *ap, unsigned int device)
+{
+ struct ata_device *adev = &ap->link.device[device];
+ if (ap->private_data != adev) {
+ iowrite8(0xd6, ap->ioaddr.bmdma_addr + 0x1f);
+ ata_sff_dev_select(ap, device);
+ ninja32_set_piomode(ap, adev);
+ }
+}
+
+static struct scsi_host_template ninja32_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ninja32_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_dev_select = ninja32_dev_select,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ninja32_set_piomode,
+};
+
+static void ninja32_program(void __iomem *base)
+{
+ iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
+ iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
+ iowrite8(0x01, base + 0x03); /* Unknown */
+ iowrite8(0x20, base + 0x04); /* WAIT0 */
+ iowrite8(0x8f, base + 0x05); /* Unknown */
+ iowrite8(0xa4, base + 0x1c); /* Unknown */
+ iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
+}
+
+static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *base;
+ int rc;
+
+ host = ata_host_alloc(&dev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+ ap = host->ports[0];
+
+ /* Set up the PCI device */
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+ rc = pcim_iomap_regions(dev, 1 << 0, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(dev);
+ if (rc)
+ return rc;
+
+ host->iomap = pcim_iomap_table(dev);
+ rc = pci_set_dma_mask(dev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(dev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ pci_set_master(dev);
+
+ /* Set up the register mappings. We use the I/O mapping as only the
+ older chips also have MMIO on BAR 1 */
+ base = host->iomap[0];
+ if (!base)
+ return -ENOMEM;
+ ap->ops = &ninja32_port_ops;
+ ap->pio_mask = 0x1F;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ ap->ioaddr.cmd_addr = base + 0x10;
+ ap->ioaddr.ctl_addr = base + 0x1E;
+ ap->ioaddr.altstatus_addr = base + 0x1E;
+ ap->ioaddr.bmdma_addr = base;
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ninja32_program(base);
+ /* FIXME: Should we disable them at remove ? */
+ return ata_host_activate(host, dev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &ninja32_sht);
+}
+
+#ifdef CONFIG_PM
+
+static int ninja32_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ ninja32_program(host->iomap[0]);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id ninja32[] = {
+ { 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { },
+};
+
+static struct pci_driver ninja32_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ninja32,
+ .probe = ninja32_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ninja32_reinit_one,
+#endif
+};
+
+static int __init ninja32_init(void)
+{
+ return pci_register_driver(&ninja32_pci_driver);
+}
+
+static void __exit ninja32_exit(void)
+{
+ pci_unregister_driver(&ninja32_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Ninja32 ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ninja32);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ninja32_init);
+module_exit(ninja32_exit);
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
new file mode 100644
index 0000000..40d411c
--- /dev/null
+++ b/drivers/ata/pata_ns87410.c
@@ -0,0 +1,188 @@
+/*
+ * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
+ * (C) 2006 Red Hat Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_ns87410"
+#define DRV_VERSION "0.4.6"
+
+/**
+ * ns87410_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Check enabled ports
+ */
+
+static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits ns87410_enable_bits[] = {
+ { 0x43, 1, 0x08, 0x08 },
+ { 0x47, 1, 0x08, 0x08 }
+ };
+
+ if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * ns87410_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program timing data. This is kept per channel not per device,
+ * and only affects the data port.
+ */
+
+static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x40 + 4 * ap->port_no;
+ u8 idetcr, idefr;
+ struct ata_timing at;
+
+ static const u8 activebits[15] = {
+ 0, 1, 2, 3, 4,
+ 5, 5, 6, 6, 6,
+ 6, 7, 7, 7, 7
+ };
+
+ static const u8 recoverbits[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
+ };
+
+ pci_read_config_byte(pdev, port + 3, &idefr);
+
+ if (ata_pio_need_iordy(adev))
+ idefr |= 0x04; /* IORDY enable */
+ else
+ idefr &= ~0x04;
+
+ if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
+ dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
+ return;
+ }
+
+ at.active = clamp_val(at.active, 2, 16) - 2;
+ at.setup = clamp_val(at.setup, 1, 4) - 1;
+ at.recover = clamp_val(at.recover, 1, 12) - 1;
+
+ idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
+
+ pci_write_config_byte(pdev, port, idetcr);
+ pci_write_config_byte(pdev, port + 3, idefr);
+ /* We use ap->private_data as a pointer to the device currently
+ loaded for timing */
+ ap->private_data = adev;
+}
+
+/**
+ * ns87410_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary.
+ */
+
+static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ /* If modes have been configured and the channel data is not loaded
+ then load it. We have to check if pio_mode is set as the core code
+ does not set adev->pio_mode to XFER_PIO_0 while probing as would be
+ logical */
+
+ if (adev->pio_mode && adev != ap->private_data)
+ ns87410_set_piomode(ap, adev);
+
+ return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template ns87410_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations ns87410_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = ns87410_qc_issue,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ns87410_set_piomode,
+ .prereset = ns87410_pre_reset,
+};
+
+static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x0F,
+ .port_ops = &ns87410_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL);
+}
+
+static const struct pci_device_id ns87410[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), },
+
+ { },
+};
+
+static struct pci_driver ns87410_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ns87410,
+ .probe = ns87410_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init ns87410_init(void)
+{
+ return pci_register_driver(&ns87410_pci_driver);
+}
+
+static void __exit ns87410_exit(void)
+{
+ pci_unregister_driver(&ns87410_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87410);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ns87410_init);
+module_exit(ns87410_exit);
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
new file mode 100644
index 0000000..89bf5f8
--- /dev/null
+++ b/drivers/ata/pata_ns87415.c
@@ -0,0 +1,415 @@
+/*
+ * pata_ns87415.c - NS87415 (non PARISC) PATA
+ *
+ * (C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk>
+ *
+ * This is a fairly generic MWDMA controller. It has some limitations
+ * as it requires timing reloads on PIO/DMA transitions but it is otherwise
+ * fairly well designed.
+ *
+ * This driver assumes the firmware has left the chip in a valid ST506
+ * compliant state, either legacy IRQ 14/15 or native INTA shared. You
+ * may need to add platform code if your system fails to do this.
+ *
+ * The same cell appears in the 87560 controller used by some PARISC
+ * systems. This has its own special mountain of errata.
+ *
+ * TODO:
+ * Test PARISC SuperIO
+ * Get someone to test on SPARC
+ * Implement lazy pio/dma switching for better performance
+ * 8bit shared timing.
+ * See if we need to kill the FIFO for ATAPI
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_ns87415"
+#define DRV_VERSION "0.0.1"
+
+/**
+ * ns87415_set_mode - Initialize host controller mode timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device whose timings we are configuring
+ * @mode: Mode to set
+ *
+ * Program the mode registers for this controller, channel and
+ * device. Because the chip is quite an old design we have to do this
+ * for PIO/DMA switches.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ int unit = 2 * ap->port_no + adev->devno;
+ int timing = 0x44 + 2 * unit;
+ unsigned long T = 1000000000 / 33333; /* PCI clocks */
+ struct ata_timing t;
+ u16 clocking;
+ u8 iordy;
+ u8 status;
+
+ /* Timing register format is 17 - low nybble read timing with
+ the high nybble being 16 - x for recovery time in PCI clocks */
+
+ ata_timing_compute(adev, adev->pio_mode, &t, T, 0);
+
+ clocking = 17 - clamp_val(t.active, 2, 17);
+ clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4;
+ /* Use the same timing for read and write bytes */
+ clocking |= (clocking << 8);
+ pci_write_config_word(dev, timing, clocking);
+
+ /* Set the IORDY enable versus DMA enable on or off properly */
+ pci_read_config_byte(dev, 0x42, &iordy);
+ iordy &= ~(1 << (4 + unit));
+ if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev))
+ iordy |= (1 << (4 + unit));
+
+ /* Paranoia: We shouldn't ever get here with busy write buffers
+ but if so wait */
+
+ pci_read_config_byte(dev, 0x43, &status);
+ while (status & 0x03) {
+ udelay(1);
+ pci_read_config_byte(dev, 0x43, &status);
+ }
+ /* Flip the IORDY/DMA bits now we are sure the write buffers are
+ clear */
+ pci_write_config_byte(dev, 0x42, iordy);
+
+ /* TODO: Set byte 54 command timing to the best 8bit
+ mode shared by all four devices */
+}
+
+/**
+ * ns87415_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void ns87415_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ ns87415_set_mode(ap, adev, adev->pio_mode);
+}
+
+/**
+ * ns87415_bmdma_setup - Set up DMA
+ * @qc: Command block
+ *
+ * Set up for bus masterng DMA. We have to do this ourselves
+ * rather than use the helper due to a chip erratum
+ */
+
+static void ns87415_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ /* Due to an erratum we need to write these bits to the wrong
+ place - which does save us an I/O bizarrely */
+ dmactl |= ATA_DMA_INTR | ATA_DMA_ERR;
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ * ns87415_bmdma_start - Begin DMA transfer
+ * @qc: Command block
+ *
+ * Switch the timings for the chip and set up for a DMA transfer
+ * before the DMA burst begins.
+ *
+ * FIXME: We should do lazy switching on bmdma_start versus
+ * ata_pio_data_xfer for better performance.
+ */
+
+static void ns87415_bmdma_start(struct ata_queued_cmd *qc)
+{
+ ns87415_set_mode(qc->ap, qc->dev, qc->dev->dma_mode);
+ ata_bmdma_start(qc);
+}
+
+/**
+ * ns87415_bmdma_stop - End DMA transfer
+ * @qc: Command block
+ *
+ * End DMA mode and switch the controller back into PIO mode
+ */
+
+static void ns87415_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ ata_bmdma_stop(qc);
+ ns87415_set_mode(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+/**
+ * ns87415_irq_clear - Clear interrupt
+ * @ap: Channel to clear
+ *
+ * Erratum: Due to a chip bug regisers 02 and 0A bit 1 and 2 (the
+ * error bits) are reset by writing to register 00 or 08.
+ */
+
+static void ns87415_irq_clear(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+ iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR),
+ mmio + ATA_DMA_CMD);
+}
+
+/**
+ * ns87415_check_atapi_dma - ATAPI DMA filter
+ * @qc: Command block
+ *
+ * Disable ATAPI DMA (for now). We may be able to do DMA if we
+ * kill the prefetching. This isn't clear.
+ */
+
+static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return -EOPNOTSUPP;
+}
+
+#if defined(CONFIG_SUPERIO)
+
+/* SUPERIO 87560 is a PoS chip that NatSem denies exists.
+ * Unfortunately, it's built-in on all Astro-based PA-RISC workstations
+ * which use the integrated NS87514 cell for CD-ROM support.
+ * i.e we have to support for CD-ROM installs.
+ * See drivers/parisc/superio.c for more gory details.
+ *
+ * Workarounds taken from drivers/ide/pci/ns87415.c
+ */
+
+#include <asm/superio.h>
+
+#define SUPERIO_IDE_MAX_RETRIES 25
+
+/**
+ * ns87560_read_buggy - workaround buggy Super I/O chip
+ * @port: Port to read
+ *
+ * Work around chipset problems in the 87560 SuperIO chip
+ */
+
+static u8 ns87560_read_buggy(void __iomem *port)
+{
+ u8 tmp;
+ int retries = SUPERIO_IDE_MAX_RETRIES;
+ do {
+ tmp = ioread8(port);
+ if (tmp != 0)
+ return tmp;
+ udelay(50);
+ } while(retries-- > 0);
+ return tmp;
+}
+
+/**
+ * ns87560_check_status
+ * @ap: channel to check
+ *
+ * Return the status of the channel working around the
+ * 87560 flaws.
+ */
+
+static u8 ns87560_check_status(struct ata_port *ap)
+{
+ return ns87560_read_buggy(ap->ioaddr.status_addr);
+}
+
+/**
+ * ns87560_tf_read - input device's ATA taskfile shadow registers
+ * @ap: Port from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Reads ATA taskfile registers for currently-selected device
+ * into @tf. Work around the 87560 bugs.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = ns87560_check_status(ap);
+ tf->feature = ioread8(ioaddr->error_addr);
+ tf->nsect = ioread8(ioaddr->nsect_addr);
+ tf->lbal = ioread8(ioaddr->lbal_addr);
+ tf->lbam = ioread8(ioaddr->lbam_addr);
+ tf->lbah = ioread8(ioaddr->lbah_addr);
+ tf->device = ns87560_read_buggy(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+ tf->hob_feature = ioread8(ioaddr->error_addr);
+ tf->hob_nsect = ioread8(ioaddr->nsect_addr);
+ tf->hob_lbal = ioread8(ioaddr->lbal_addr);
+ tf->hob_lbam = ioread8(ioaddr->lbam_addr);
+ tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+ iowrite8(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+/**
+ * ns87560_bmdma_status
+ * @ap: channel to check
+ *
+ * Return the DMA status of the channel working around the
+ * 87560 flaws.
+ */
+
+static u8 ns87560_bmdma_status(struct ata_port *ap)
+{
+ return ns87560_read_buggy(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+}
+#endif /* 87560 SuperIO Support */
+
+static struct ata_port_operations ns87415_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .check_atapi_dma = ns87415_check_atapi_dma,
+ .bmdma_setup = ns87415_bmdma_setup,
+ .bmdma_start = ns87415_bmdma_start,
+ .bmdma_stop = ns87415_bmdma_stop,
+ .sff_irq_clear = ns87415_irq_clear,
+
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = ns87415_set_piomode,
+};
+
+#if defined(CONFIG_SUPERIO)
+static struct ata_port_operations ns87560_pata_ops = {
+ .inherits = &ns87415_pata_ops,
+ .sff_tf_read = ns87560_tf_read,
+ .sff_check_status = ns87560_check_status,
+ .bmdma_status = ns87560_bmdma_status,
+};
+#endif
+
+static struct scsi_host_template ns87415_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+/**
+ * ns87415_init_one - Register 87415 ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in ns87415_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .port_ops = &ns87415_pata_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ int rc;
+#if defined(CONFIG_SUPERIO)
+ static const struct ata_port_info info87560 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .port_ops = &ns87560_pata_ops,
+ };
+
+ if (PCI_SLOT(pdev->devfn) == 0x0E)
+ ppi[0] = &info87560;
+#endif
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Select 512 byte sectors */
+ pci_write_config_byte(pdev, 0x55, 0xEE);
+ /* Select PIO0 8bit clocking */
+ pci_write_config_byte(pdev, 0x54, 0xB7);
+ return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL);
+}
+
+static const struct pci_device_id ns87415_pci_tbl[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87415), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver ns87415_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = ns87415_pci_tbl,
+ .probe = ns87415_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init ns87415_init(void)
+{
+ return pci_register_driver(&ns87415_pci_driver);
+}
+
+static void __exit ns87415_exit(void)
+{
+ pci_unregister_driver(&ns87415_pci_driver);
+}
+
+module_init(ns87415_init);
+module_exit(ns87415_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("ATA low-level driver for NS87415 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, ns87415_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
new file mode 100644
index 0000000..1f18ad9
--- /dev/null
+++ b/drivers/ata/pata_of_platform.c
@@ -0,0 +1,114 @@
+/*
+ * OF-platform PATA driver
+ *
+ * Copyright (c) 2007 MontaVista Software, Inc.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/ata_platform.h>
+
+static int __devinit pata_of_platform_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret;
+ struct device_node *dn = ofdev->node;
+ struct resource io_res;
+ struct resource ctl_res;
+ struct resource irq_res;
+ unsigned int reg_shift = 0;
+ int pio_mode = 0;
+ int pio_mask;
+ const u32 *prop;
+
+ ret = of_address_to_resource(dn, 0, &io_res);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't get IO address from "
+ "device tree\n");
+ return -EINVAL;
+ }
+
+ if (of_device_is_compatible(dn, "electra-ide")) {
+ /* Altstatus is really at offset 0x3f6 from the primary window
+ * on electra-ide. Adjust ctl_res and io_res accordingly.
+ */
+ ctl_res = io_res;
+ ctl_res.start = ctl_res.start+0x3f6;
+ io_res.end = ctl_res.start-1;
+ } else {
+ ret = of_address_to_resource(dn, 1, &ctl_res);
+ if (ret) {
+ dev_err(&ofdev->dev, "can't get CTL address from "
+ "device tree\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = of_irq_to_resource(dn, 0, &irq_res);
+ if (ret == NO_IRQ)
+ irq_res.start = irq_res.end = 0;
+ else
+ irq_res.flags = 0;
+
+ prop = of_get_property(dn, "reg-shift", NULL);
+ if (prop)
+ reg_shift = *prop;
+
+ prop = of_get_property(dn, "pio-mode", NULL);
+ if (prop) {
+ pio_mode = *prop;
+ if (pio_mode > 6) {
+ dev_err(&ofdev->dev, "invalid pio-mode\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n");
+ }
+
+ pio_mask = 1 << pio_mode;
+ pio_mask |= (1 << pio_mode) - 1;
+
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
+ reg_shift, pio_mask);
+}
+
+static int __devexit pata_of_platform_remove(struct of_device *ofdev)
+{
+ return __pata_platform_remove(&ofdev->dev);
+}
+
+static struct of_device_id pata_of_platform_match[] = {
+ { .compatible = "ata-generic", },
+ { .compatible = "electra-ide", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pata_of_platform_match);
+
+static struct of_platform_driver pata_of_platform_driver = {
+ .name = "pata_of_platform",
+ .match_table = pata_of_platform_match,
+ .probe = pata_of_platform_probe,
+ .remove = __devexit_p(pata_of_platform_remove),
+};
+
+static int __init pata_of_platform_init(void)
+{
+ return of_register_platform_driver(&pata_of_platform_driver);
+}
+module_init(pata_of_platform_init);
+
+static void __exit pata_of_platform_exit(void)
+{
+ of_unregister_platform_driver(&pata_of_platform_driver);
+}
+module_exit(pata_of_platform_exit);
+
+MODULE_DESCRIPTION("OF-platform PATA driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
new file mode 100644
index 0000000..c0dbc46
--- /dev/null
+++ b/drivers/ata/pata_oldpiix.c
@@ -0,0 +1,290 @@
+/*
+ * pata_oldpiix.c - Intel PATA/SATA controllers
+ *
+ * (C) 2005 Red Hat
+ *
+ * Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ * Early PIIX differs significantly from the later PIIX as it lacks
+ * SITRE and the slave timing registers. This means that you have to
+ * set timing per channel, or be clever. Libata tells us whenever it
+ * does drive selection and we use this to reload the timings.
+ *
+ * Because of these behaviour differences PIIX gets its own driver module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_oldpiix"
+#define DRV_VERSION "0.5.5"
+
+/**
+ * oldpiix_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int oldpiix_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits oldpiix_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+ };
+
+ if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * oldpiix_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device whose timings we are configuring
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+ u16 idetm_data;
+ int control = 0;
+
+ /*
+ * See Intel Document 298600-004 for the timing programing rules
+ * for PIIX/ICH. Note that the early PIIX does not have the slave
+ * timing port at 0x44.
+ */
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio > 1)
+ control |= 1; /* TIME */
+ if (ata_pio_need_iordy(adev))
+ control |= 2; /* IE */
+
+ /* Intel specifies that the prefetch/posting is for disk only */
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE */
+
+ pci_read_config_word(dev, idetm_port, &idetm_data);
+
+ /*
+ * Set PPE, IE and TIME as appropriate.
+ * Clear the other drive's timing bits.
+ */
+ if (adev->devno == 0) {
+ idetm_data &= 0xCCE0;
+ idetm_data |= control;
+ } else {
+ idetm_data &= 0xCC0E;
+ idetm_data |= (control << 4);
+ }
+ idetm_data |= (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ pci_write_config_word(dev, idetm_port, idetm_data);
+
+ /* Track which port is configured */
+ ap->private_data = adev;
+}
+
+/**
+ * oldpiix_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ * @isich: True if the device is an ICH and has IOCFG registers
+ *
+ * Set MWDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u8 idetm_port = ap->port_no ? 0x42 : 0x40;
+ u16 idetm_data;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ pci_read_config_word(dev, idetm_port, &idetm_data);
+
+ control = 3; /* IORDY|TIME0 */
+ /* Intel specifies that the PPE functionality is for disk only */
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE enable */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ /* Mask out the relevant control and timing bits we will load. Also
+ clear the other drive TIME register as a precaution */
+ if (adev->devno == 0) {
+ idetm_data &= 0xCCE0;
+ idetm_data |= control;
+ } else {
+ idetm_data &= 0xCC0E;
+ idetm_data |= (control << 4);
+ }
+ idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ pci_write_config_word(dev, idetm_port, idetm_data);
+
+ /* Track which port is configured */
+ ap->private_data = adev;
+}
+
+/**
+ * oldpiix_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary. Our logic also clears TIME0/TIME1 for the other device so
+ * that, even if we get this wrong, cycles to the other device will
+ * be made PIO0.
+ */
+
+static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ if (adev != ap->private_data) {
+ oldpiix_set_piomode(ap, adev);
+ if (ata_dma_enabled(adev))
+ oldpiix_set_dmamode(ap, adev);
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+
+static struct scsi_host_template oldpiix_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations oldpiix_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = oldpiix_qc_issue,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = oldpiix_set_piomode,
+ .set_dmamode = oldpiix_set_dmamode,
+ .prereset = oldpiix_pre_reset,
+};
+
+
+/**
+ * oldpiix_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in oldpiix_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma1-2 */
+ .port_ops = &oldpiix_pata_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL);
+}
+
+static const struct pci_device_id oldpiix_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x1230), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver oldpiix_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = oldpiix_pci_tbl,
+ .probe = oldpiix_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init oldpiix_init(void)
+{
+ return pci_register_driver(&oldpiix_pci_driver);
+}
+
+static void __exit oldpiix_exit(void)
+{
+ pci_unregister_driver(&oldpiix_pci_driver);
+}
+
+module_init(oldpiix_init);
+module_exit(oldpiix_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
new file mode 100644
index 0000000..e4fa4d5
--- /dev/null
+++ b/drivers/ata/pata_opti.c
@@ -0,0 +1,214 @@
+/*
+ * pata_opti.c - ATI PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * Based on
+ * linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
+ *
+ * Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
+ *
+ * Authors:
+ * Jaromir Koutek <miri@punknet.cz>,
+ * Jan Harkes <jaharkes@cwi.nl>,
+ * Mark Lord <mlord@pobox.com>
+ * Some parts of code are from ali14xx.c and from rz1000.c.
+ *
+ * Also consulted the FreeBSD prototype driver by Kevin Day to try
+ * and resolve some confusions. Further documentation can be found in
+ * Ralf Brown's interrupt list
+ *
+ * If you have other variants of the Opti range (Viper/Vendetta) please
+ * try this driver with those PCI idents and report back. For the later
+ * chips see the pata_optidma driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_opti"
+#define DRV_VERSION "0.2.9"
+
+enum {
+ READ_REG = 0, /* index of Read cycle timing register */
+ WRITE_REG = 1, /* index of Write cycle timing register */
+ CNTRL_REG = 3, /* index of Control register */
+ STRAP_REG = 5, /* index of Strap register */
+ MISC_REG = 6 /* index of Miscellaneous register */
+};
+
+/**
+ * opti_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int opti_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits opti_enable_bits[] = {
+ { 0x45, 1, 0x80, 0x00 },
+ { 0x40, 1, 0x08, 0x00 }
+ };
+
+ if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * opti_write_reg - control register setup
+ * @ap: ATA port
+ * @value: value
+ * @reg: control register number
+ *
+ * The Opti uses magic 'trapdoor' register accesses to do configuration
+ * rather than using PCI space as other controllers do. The double inw
+ * on the error register activates configuration mode. We can then write
+ * the control register
+ */
+
+static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
+{
+ void __iomem *regio = ap->ioaddr.cmd_addr;
+
+ /* These 3 unlock the control register access */
+ ioread16(regio + 1);
+ ioread16(regio + 1);
+ iowrite8(3, regio + 2);
+
+ /* Do the I/O */
+ iowrite8(val, regio + reg);
+
+ /* Relock */
+ iowrite8(0x83, regio + 2);
+}
+
+/**
+ * opti_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. Timing numbers are taken from
+ * the FreeBSD driver then pre computed to keep the code clean. There
+ * are two tables depending on the hardware clock speed.
+ */
+
+static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_device *pair = ata_dev_pair(adev);
+ int clock;
+ int pio = adev->pio_mode - XFER_PIO_0;
+ void __iomem *regio = ap->ioaddr.cmd_addr;
+ u8 addr;
+
+ /* Address table precomputed with prefetch off and a DCLK of 2 */
+ static const u8 addr_timing[2][5] = {
+ { 0x30, 0x20, 0x20, 0x10, 0x10 },
+ { 0x20, 0x20, 0x10, 0x10, 0x10 }
+ };
+ static const u8 data_rec_timing[2][5] = {
+ { 0x6B, 0x56, 0x42, 0x32, 0x31 },
+ { 0x58, 0x44, 0x32, 0x22, 0x21 }
+ };
+
+ iowrite8(0xff, regio + 5);
+ clock = ioread16(regio + 5) & 1;
+
+ /*
+ * As with many controllers the address setup time is shared
+ * and must suit both devices if present.
+ */
+
+ addr = addr_timing[clock][pio];
+ if (pair) {
+ /* Hardware constraint */
+ u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
+ if (pair_addr > addr)
+ addr = pair_addr;
+ }
+
+ /* Commence primary programming sequence */
+ opti_write_reg(ap, adev->devno, MISC_REG);
+ opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
+ opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
+ opti_write_reg(ap, addr, MISC_REG);
+
+ /* Programming sequence complete, override strapping */
+ opti_write_reg(ap, 0x85, CNTRL_REG);
+}
+
+static struct scsi_host_template opti_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations opti_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = opti_set_piomode,
+ .prereset = opti_pre_reset,
+};
+
+static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &opti_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ static int printed_version;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL);
+}
+
+static const struct pci_device_id opti[] = {
+ { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
+ { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
+
+ { },
+};
+
+static struct pci_driver opti_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = opti,
+ .probe = opti_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init opti_init(void)
+{
+ return pci_register_driver(&opti_pci_driver);
+}
+
+static void __exit opti_exit(void)
+{
+ pci_unregister_driver(&opti_pci_driver);
+}
+
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, opti);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(opti_init);
+module_exit(opti_exit);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
new file mode 100644
index 0000000..93bb6e9
--- /dev/null
+++ b/drivers/ata/pata_optidma.c
@@ -0,0 +1,469 @@
+/*
+ * pata_optidma.c - Opti DMA PATA for new ATA layer
+ * (C) 2006 Red Hat Inc
+ *
+ * The Opti DMA controllers are related to the older PIO PCI controllers
+ * and indeed the VLB ones. The main differences are that the timing
+ * numbers are now based off PCI clocks not VLB and differ, and that
+ * MWDMA is supported.
+ *
+ * This driver should support Viper-N+, FireStar, FireStar Plus.
+ *
+ * These devices support virtual DMA for read (aka the CS5520). Later
+ * chips support UDMA33, but only if the rest of the board logic does,
+ * so you have to get this right. We don't support the virtual DMA
+ * but we do handle UDMA.
+ *
+ * Bits that are worth knowing
+ * Most control registers are shadowed into I/O registers
+ * 0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
+ * Virtual DMA registers *move* between rev 0x02 and rev 0x10
+ * UDMA requires a 66MHz FSB
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_optidma"
+#define DRV_VERSION "0.3.2"
+
+enum {
+ READ_REG = 0, /* index of Read cycle timing register */
+ WRITE_REG = 1, /* index of Write cycle timing register */
+ CNTRL_REG = 3, /* index of Control register */
+ STRAP_REG = 5, /* index of Strap register */
+ MISC_REG = 6 /* index of Miscellaneous register */
+};
+
+static int pci_clock; /* 0 = 33 1 = 25 */
+
+/**
+ * optidma_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int optidma_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const struct pci_bits optidma_enable_bits = {
+ 0x40, 1, 0x08, 0x00
+ };
+
+ if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * optidma_unlock - unlock control registers
+ * @ap: ATA port
+ *
+ * Unlock the control register block for this adapter. Registers must not
+ * be unlocked in a situation where libata might look at them.
+ */
+
+static void optidma_unlock(struct ata_port *ap)
+{
+ void __iomem *regio = ap->ioaddr.cmd_addr;
+
+ /* These 3 unlock the control register access */
+ ioread16(regio + 1);
+ ioread16(regio + 1);
+ iowrite8(3, regio + 2);
+}
+
+/**
+ * optidma_lock - issue temporary relock
+ * @ap: ATA port
+ *
+ * Re-lock the configuration register settings.
+ */
+
+static void optidma_lock(struct ata_port *ap)
+{
+ void __iomem *regio = ap->ioaddr.cmd_addr;
+
+ /* Relock */
+ iowrite8(0x83, regio + 2);
+}
+
+/**
+ * optidma_mode_setup - set mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ * @mode: Mode to set
+ *
+ * Called to do the DMA or PIO mode setup. Timing numbers are all
+ * pre computed to keep the code clean. There are two tables depending
+ * on the hardware clock speed.
+ *
+ * WARNING: While we do this the IDE registers vanish. If we take an
+ * IRQ here we depend on the host set locking to avoid catastrophe.
+ */
+
+static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+ struct ata_device *pair = ata_dev_pair(adev);
+ int pio = adev->pio_mode - XFER_PIO_0;
+ int dma = adev->dma_mode - XFER_MW_DMA_0;
+ void __iomem *regio = ap->ioaddr.cmd_addr;
+ u8 addr;
+
+ /* Address table precomputed with a DCLK of 2 */
+ static const u8 addr_timing[2][5] = {
+ { 0x30, 0x20, 0x20, 0x10, 0x10 },
+ { 0x20, 0x20, 0x10, 0x10, 0x10 }
+ };
+ static const u8 data_rec_timing[2][5] = {
+ { 0x59, 0x46, 0x30, 0x20, 0x20 },
+ { 0x46, 0x32, 0x20, 0x20, 0x10 }
+ };
+ static const u8 dma_data_rec_timing[2][3] = {
+ { 0x76, 0x20, 0x20 },
+ { 0x54, 0x20, 0x10 }
+ };
+
+ /* Switch from IDE to control mode */
+ optidma_unlock(ap);
+
+
+ /*
+ * As with many controllers the address setup time is shared
+ * and must suit both devices if present. FIXME: Check if we
+ * need to look at slowest of PIO/DMA mode of either device
+ */
+
+ if (mode >= XFER_MW_DMA_0)
+ addr = 0;
+ else
+ addr = addr_timing[pci_clock][pio];
+
+ if (pair) {
+ u8 pair_addr;
+ /* Hardware constraint */
+ if (pair->dma_mode)
+ pair_addr = 0;
+ else
+ pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
+ if (pair_addr > addr)
+ addr = pair_addr;
+ }
+
+ /* Commence primary programming sequence */
+ /* First we load the device number into the timing select */
+ iowrite8(adev->devno, regio + MISC_REG);
+ /* Now we load the data timings into read data/write data */
+ if (mode < XFER_MW_DMA_0) {
+ iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
+ iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
+ } else if (mode < XFER_UDMA_0) {
+ iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
+ iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
+ }
+ /* Finally we load the address setup into the misc register */
+ iowrite8(addr | adev->devno, regio + MISC_REG);
+
+ /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
+ iowrite8(0x85, regio + CNTRL_REG);
+
+ /* Switch back to IDE mode */
+ optidma_lock(ap);
+
+ /* Note: at this point our programming is incomplete. We are
+ not supposed to program PCI 0x43 "things we hacked onto the chip"
+ until we've done both sets of PIO/DMA timings */
+}
+
+/**
+ * optiplus_mode_setup - DMA setup for Firestar Plus
+ * @ap: ATA port
+ * @adev: device
+ * @mode: desired mode
+ *
+ * The Firestar plus has additional UDMA functionality for UDMA0-2 and
+ * requires we do some additional work. Because the base work we must do
+ * is mostly shared we wrap the Firestar setup functionality in this
+ * one
+ */
+
+static void optiplus_mode_setup(struct ata_port *ap, struct ata_device *adev, u8 mode)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 udcfg;
+ u8 udslave;
+ int dev2 = 2 * adev->devno;
+ int unit = 2 * ap->port_no + adev->devno;
+ int udma = mode - XFER_UDMA_0;
+
+ pci_read_config_byte(pdev, 0x44, &udcfg);
+ if (mode <= XFER_UDMA_0) {
+ udcfg &= ~(1 << unit);
+ optidma_mode_setup(ap, adev, adev->dma_mode);
+ } else {
+ udcfg |= (1 << unit);
+ if (ap->port_no) {
+ pci_read_config_byte(pdev, 0x45, &udslave);
+ udslave &= ~(0x03 << dev2);
+ udslave |= (udma << dev2);
+ pci_write_config_byte(pdev, 0x45, udslave);
+ } else {
+ udcfg &= ~(0x30 << dev2);
+ udcfg |= (udma << dev2);
+ }
+ }
+ pci_write_config_byte(pdev, 0x44, udcfg);
+}
+
+/**
+ * optidma_set_pio_mode - PIO setup callback
+ * @ap: ATA port
+ * @adev: Device
+ *
+ * The libata core provides separate functions for handling PIO and
+ * DMA programming. The architecture of the Firestar makes it easier
+ * for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ optidma_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ * optidma_set_dma_mode - DMA setup callback
+ * @ap: ATA port
+ * @adev: Device
+ *
+ * The libata core provides separate functions for handling PIO and
+ * DMA programming. The architecture of the Firestar makes it easier
+ * for us to have a common function so we provide wrappers
+ */
+
+static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ optidma_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ * optiplus_set_pio_mode - PIO setup callback
+ * @ap: ATA port
+ * @adev: Device
+ *
+ * The libata core provides separate functions for handling PIO and
+ * DMA programming. The architecture of the Firestar makes it easier
+ * for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ optiplus_mode_setup(ap, adev, adev->pio_mode);
+}
+
+/**
+ * optiplus_set_dma_mode - DMA setup callback
+ * @ap: ATA port
+ * @adev: Device
+ *
+ * The libata core provides separate functions for handling PIO and
+ * DMA programming. The architecture of the Firestar makes it easier
+ * for us to have a common function so we provide wrappers
+ */
+
+static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ optiplus_mode_setup(ap, adev, adev->dma_mode);
+}
+
+/**
+ * optidma_make_bits - PCI setup helper
+ * @adev: ATA device
+ *
+ * Turn the ATA device setup into PCI configuration bits
+ * for register 0x43 and return the two bits needed.
+ */
+
+static u8 optidma_make_bits43(struct ata_device *adev)
+{
+ static const u8 bits43[5] = {
+ 0, 0, 0, 1, 2
+ };
+ if (!ata_dev_enabled(adev))
+ return 0;
+ if (adev->dma_mode)
+ return adev->dma_mode - XFER_MW_DMA_0;
+ return bits43[adev->pio_mode - XFER_PIO_0];
+}
+
+/**
+ * optidma_set_mode - mode setup
+ * @link: link to set up
+ *
+ * Use the standard setup to tune the chipset and then finalise the
+ * configuration by writing the nibble of extra bits of data into
+ * the chip.
+ */
+
+static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+ struct ata_port *ap = link->ap;
+ u8 r;
+ int nybble = 4 * ap->port_no;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int rc = ata_do_set_mode(link, r_failed);
+ if (rc == 0) {
+ pci_read_config_byte(pdev, 0x43, &r);
+
+ r &= (0x0F << nybble);
+ r |= (optidma_make_bits43(&link->device[0]) +
+ (optidma_make_bits43(&link->device[0]) << 2)) << nybble;
+ pci_write_config_byte(pdev, 0x43, r);
+ }
+ return rc;
+}
+
+static struct scsi_host_template optidma_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations optidma_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = optidma_set_pio_mode,
+ .set_dmamode = optidma_set_dma_mode,
+ .set_mode = optidma_set_mode,
+ .prereset = optidma_pre_reset,
+};
+
+static struct ata_port_operations optiplus_port_ops = {
+ .inherits = &optidma_port_ops,
+ .set_piomode = optiplus_set_pio_mode,
+ .set_dmamode = optiplus_set_dma_mode,
+};
+
+/**
+ * optiplus_with_udma - Look for UDMA capable setup
+ * @pdev; ATA controller
+ */
+
+static int optiplus_with_udma(struct pci_dev *pdev)
+{
+ u8 r;
+ int ret = 0;
+ int ioport = 0x22;
+ struct pci_dev *dev1;
+
+ /* Find function 1 */
+ dev1 = pci_get_device(0x1045, 0xC701, NULL);
+ if (dev1 == NULL)
+ return 0;
+
+ /* Rev must be >= 0x10 */
+ pci_read_config_byte(dev1, 0x08, &r);
+ if (r < 0x10)
+ goto done_nomsg;
+ /* Read the chipset system configuration to check our mode */
+ pci_read_config_byte(dev1, 0x5F, &r);
+ ioport |= (r << 8);
+ outb(0x10, ioport);
+ /* Must be 66Mhz sync */
+ if ((inb(ioport + 2) & 1) == 0)
+ goto done;
+
+ /* Check the ATA arbitration/timing is suitable */
+ pci_read_config_byte(pdev, 0x42, &r);
+ if ((r & 0x36) != 0x36)
+ goto done;
+ pci_read_config_byte(dev1, 0x52, &r);
+ if (r & 0x80) /* IDEDIR disabled */
+ ret = 1;
+done:
+ printk(KERN_WARNING "UDMA not supported in this configuration.\n");
+done_nomsg: /* Wrong chip revision */
+ pci_dev_put(dev1);
+ return ret;
+}
+
+static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info_82c700 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &optidma_port_ops
+ };
+ static const struct ata_port_info info_82c700_udma = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07,
+ .port_ops = &optiplus_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info_82c700, NULL };
+ static int printed_version;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+
+ /* Fixed location chipset magic */
+ inw(0x1F1);
+ inw(0x1F1);
+ pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
+
+ if (optiplus_with_udma(dev))
+ ppi[0] = &info_82c700_udma;
+
+ return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL);
+}
+
+static const struct pci_device_id optidma[] = {
+ { PCI_VDEVICE(OPTI, 0xD568), }, /* Opti 82C700 */
+
+ { },
+};
+
+static struct pci_driver optidma_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = optidma,
+ .probe = optidma_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init optidma_init(void)
+{
+ return pci_register_driver(&optidma_pci_driver);
+}
+
+static void __exit optidma_exit(void)
+{
+ pci_unregister_driver(&optidma_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, optidma);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(optidma_init);
+module_exit(optidma_exit);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
new file mode 100644
index 0000000..64b2e22
--- /dev/null
+++ b/drivers/ata/pata_pcmcia.c
@@ -0,0 +1,451 @@
+/*
+ * pata_pcmcia.c - PCMCIA PATA controller driver.
+ * Copyright 2005-2006 Red Hat Inc, all rights reserved.
+ * PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
+ * <openembedded@hrw.one.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Heavily based upon ide-cs.c
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ciscode.h>
+
+
+#define DRV_NAME "pata_pcmcia"
+#define DRV_VERSION "0.3.3"
+
+/*
+ * Private data structure to glue stuff together
+ */
+
+struct ata_pcmcia_info {
+ struct pcmcia_device *pdev;
+ int ndev;
+ dev_node_t node;
+};
+
+/**
+ * pcmcia_set_mode - PCMCIA specific mode setup
+ * @link: link
+ * @r_failed_dev: Return pointer for failed device
+ *
+ * Perform the tuning and setup of the devices and timings, which
+ * for PCMCIA is the same as any other controller. We wrap it however
+ * as we need to spot hardware with incorrect or missing master/slave
+ * decode, which alas is embarrassingly common in the PC world
+ */
+
+static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+{
+ struct ata_device *master = &link->device[0];
+ struct ata_device *slave = &link->device[1];
+
+ if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
+ return ata_do_set_mode(link, r_failed_dev);
+
+ if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
+ ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
+ /* Suspicious match, but could be two cards from
+ the same vendor - check serial */
+ if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
+ ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
+ ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n");
+ ata_dev_disable(slave);
+ }
+ }
+ return ata_do_set_mode(link, r_failed_dev);
+}
+
+/**
+ * pcmcia_set_mode_8bit - PCMCIA specific mode setup
+ * @link: link
+ * @r_failed_dev: Return pointer for failed device
+ *
+ * For the simple emulated 8bit stuff the less we do the better.
+ */
+
+static int pcmcia_set_mode_8bit(struct ata_link *link,
+ struct ata_device **r_failed_dev)
+{
+ return 0;
+}
+
+/**
+ * ata_data_xfer_8bit - Transfer data by 8bit PIO
+ * @dev: device to target
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @rw: read/write
+ *
+ * Transfer data from/to the device data register by 8 bit PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ if (rw == READ)
+ ioread8_rep(ap->ioaddr.data_addr, buf, buflen);
+ else
+ iowrite8_rep(ap->ioaddr.data_addr, buf, buflen);
+
+ return buflen;
+}
+
+
+static struct scsi_host_template pcmcia_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pcmcia_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = pcmcia_set_mode,
+};
+
+static struct ata_port_operations pcmcia_8bit_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_data_xfer_8bit,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = pcmcia_set_mode_8bit,
+};
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+
+struct pcmcia_config_check {
+ unsigned long ctl_base;
+ int skip_vcc;
+ int is_kme;
+};
+
+static int pcmcia_check_one_config(struct pcmcia_device *pdev,
+ cistpl_cftable_entry_t *cfg,
+ cistpl_cftable_entry_t *dflt,
+ unsigned int vcc,
+ void *priv_data)
+{
+ struct pcmcia_config_check *stk = priv_data;
+
+ /* Check for matching Vcc, unless we're desperate */
+ if (!stk->skip_vcc) {
+ if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
+ return -ENODEV;
+ } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+ if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000)
+ return -ENODEV;
+ }
+ }
+
+ if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+ pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+ else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM))
+ pdev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+ if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
+ cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
+ pdev->io.BasePort1 = io->win[0].base;
+ pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ if (io->nwin == 2) {
+ pdev->io.NumPorts1 = 8;
+ pdev->io.BasePort2 = io->win[1].base;
+ pdev->io.NumPorts2 = (stk->is_kme) ? 2 : 1;
+ if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ return -ENODEV;
+ stk->ctl_base = pdev->io.BasePort2;
+ } else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
+ pdev->io.NumPorts1 = io->win[0].len;
+ pdev->io.NumPorts2 = 0;
+ if (pcmcia_request_io(pdev, &pdev->io) != 0)
+ return -ENODEV;
+ stk->ctl_base = pdev->io.BasePort1 + 0x0e;
+ } else
+ return -ENODEV;
+ /* If we've got this far, we're done */
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * pcmcia_init_one - attach a PCMCIA interface
+ * @pdev: pcmcia device
+ *
+ * Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
+ * shared IRQ.
+ */
+
+static int pcmcia_init_one(struct pcmcia_device *pdev)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct ata_pcmcia_info *info;
+ struct pcmcia_config_check *stk = NULL;
+ int last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p;
+ unsigned long io_base, ctl_base;
+ void __iomem *io_addr, *ctl_addr;
+ int n_ports = 1;
+ struct ata_port_operations *ops = &pcmcia_port_ops;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ /* Glue stuff together. FIXME: We may be able to get rid of info with care */
+ info->pdev = pdev;
+ pdev->priv = info;
+
+ /* Set up attributes in order to probe card and get resources */
+ pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
+ pdev->io.IOAddrLines = 3;
+ pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
+ pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
+ pdev->conf.Attributes = CONF_ENABLE_IRQ;
+ pdev->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* See if we have a manufacturer identifier. Use it to set is_kme for
+ vendor quirks */
+ is_kme = ((pdev->manf_id == MANFID_KME) &&
+ ((pdev->card_id == PRODID_KME_KXLC005_A) ||
+ (pdev->card_id == PRODID_KME_KXLC005_B)));
+
+ /* Allocate resoure probing structures */
+
+ stk = kzalloc(sizeof(*stk), GFP_KERNEL);
+ if (!stk)
+ goto out1;
+ stk->is_kme = is_kme;
+ stk->skip_vcc = io_base = ctl_base = 0;
+
+ if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk)) {
+ stk->skip_vcc = 1;
+ if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk))
+ goto failed; /* No suitable config found */
+ }
+ io_base = pdev->io.BasePort1;
+ ctl_base = stk->ctl_base;
+ CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
+ CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
+
+ /* iomap */
+ ret = -ENOMEM;
+ io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
+ ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
+ if (!io_addr || !ctl_addr)
+ goto failed;
+
+ /* Success. Disable the IRQ nIEN line, do quirks */
+ iowrite8(0x02, ctl_addr);
+ if (is_kme)
+ iowrite8(0x81, ctl_addr + 0x01);
+
+ /* FIXME: Could be more ports at base + 0x10 but we only deal with
+ one right now */
+ if (pdev->io.NumPorts1 >= 0x20)
+ n_ports = 2;
+
+ if (pdev->manf_id == 0x0097 && pdev->card_id == 0x1620)
+ ops = &pcmcia_8bit_port_ops;
+ /*
+ * Having done the PCMCIA plumbing the ATA side is relatively
+ * sane.
+ */
+ ret = -ENOMEM;
+ host = ata_host_alloc(&pdev->dev, n_ports);
+ if (!host)
+ goto failed;
+
+ for (p = 0; p < n_ports; p++) {
+ ap = host->ports[p];
+
+ ap->ops = ops;
+ ap->pio_mask = 1; /* ISA so PIO 0 cycles */
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->ioaddr.cmd_addr = io_addr + 0x10 * p;
+ ap->ioaddr.altstatus_addr = ctl_addr + 0x10 * p;
+ ap->ioaddr.ctl_addr = ctl_addr + 0x10 * p;
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io_base, ctl_base);
+ }
+
+ /* activate */
+ ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt,
+ IRQF_SHARED, &pcmcia_sht);
+ if (ret)
+ goto failed;
+
+ info->ndev = 1;
+ kfree(stk);
+ return 0;
+
+cs_failed:
+ cs_error(pdev, last_fn, last_ret);
+failed:
+ kfree(stk);
+ info->ndev = 0;
+ pcmcia_disable_device(pdev);
+out1:
+ kfree(info);
+ return ret;
+}
+
+/**
+ * pcmcia_remove_one - unplug an pcmcia interface
+ * @pdev: pcmcia device
+ *
+ * A PCMCIA ATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+
+static void pcmcia_remove_one(struct pcmcia_device *pdev)
+{
+ struct ata_pcmcia_info *info = pdev->priv;
+ struct device *dev = &pdev->dev;
+
+ if (info != NULL) {
+ /* If we have attached the device to the ATA layer, detach it */
+ if (info->ndev) {
+ struct ata_host *host = dev_get_drvdata(dev);
+ ata_host_detach(host);
+ }
+ info->ndev = 0;
+ pdev->priv = NULL;
+ }
+ pcmcia_disable_device(pdev);
+ kfree(info);
+}
+
+static struct pcmcia_device_id pcmcia_devices[] = {
+ PCMCIA_DEVICE_FUNC_ID(4),
+ PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */
+ PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
+ PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
+ PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
+ PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x004f, 0x0000), /* Kingston */
+ PCMCIA_DEVICE_MANF_CARD(0x0097, 0x1620), /* TI emulated */
+ PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
+ PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
+ PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
+ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
+ PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
+ PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
+ PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
+ PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
+ PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
+ PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
+ PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
+ PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
+ PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
+ PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
+ PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
+ PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
+ PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
+ PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
+ PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee),
+ PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
+ PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
+ PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
+ PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
+ PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
+ PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
+ PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
+ PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+ PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+ PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
+ PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
+ PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
+ PCMCIA_DEVICE_PROD_ID2("Flash Card", 0x5a362506),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
+
+static struct pcmcia_driver pcmcia_driver = {
+ .owner = THIS_MODULE,
+ .drv = {
+ .name = DRV_NAME,
+ },
+ .id_table = pcmcia_devices,
+ .probe = pcmcia_init_one,
+ .remove = pcmcia_remove_one,
+};
+
+static int __init pcmcia_init(void)
+{
+ return pcmcia_register_driver(&pcmcia_driver);
+}
+
+static void __exit pcmcia_exit(void)
+{
+ pcmcia_unregister_driver(&pcmcia_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pcmcia_init);
+module_exit(pcmcia_exit);
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
new file mode 100644
index 0000000..0e1c2c1
--- /dev/null
+++ b/drivers/ata/pata_pdc2027x.c
@@ -0,0 +1,783 @@
+/*
+ * Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Ported to libata by:
+ * Albert Lee <albertcc@tw.ibm.com> IBM Corporation
+ *
+ * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
+ * Portions Copyright (C) 1999 Promise Technology, Inc.
+ *
+ * Author: Frank Tiernan (frankt@promise.com)
+ * Released under terms of General Public License
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware information only available under NDA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_pdc2027x"
+#define DRV_VERSION "1.0"
+#undef PDC_DEBUG
+
+#ifdef PDC_DEBUG
+#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+#else
+#define PDPRINTK(fmt, args...)
+#endif
+
+enum {
+ PDC_MMIO_BAR = 5,
+
+ PDC_UDMA_100 = 0,
+ PDC_UDMA_133 = 1,
+
+ PDC_100_MHZ = 100000000,
+ PDC_133_MHZ = 133333333,
+
+ PDC_SYS_CTL = 0x1100,
+ PDC_ATA_CTL = 0x1104,
+ PDC_GLOBAL_CTL = 0x1108,
+ PDC_CTCR0 = 0x110C,
+ PDC_CTCR1 = 0x1110,
+ PDC_BYTE_COUNT = 0x1120,
+ PDC_PLL_CTL = 0x1202,
+};
+
+static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask);
+static int pdc2027x_cable_detect(struct ata_port *ap);
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed);
+
+/*
+ * ATA Timing Tables based on 133MHz controller clock.
+ * These tables are only used when the controller is in 133MHz clock.
+ * If the controller is in 100MHz clock, the ASIC hardware will
+ * set the timing registers automatically when "set feature" command
+ * is issued to the device. However, if the controller clock is 133MHz,
+ * the following tables must be used.
+ */
+static struct pdc2027x_pio_timing {
+ u8 value0, value1, value2;
+} pdc2027x_pio_timing_tbl [] = {
+ { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
+ { 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
+ { 0x23, 0x26, 0x64 }, /* PIO mode 2 */
+ { 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
+ { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
+};
+
+static struct pdc2027x_mdma_timing {
+ u8 value0, value1;
+} pdc2027x_mdma_timing_tbl [] = {
+ { 0xdf, 0x5f }, /* MDMA mode 0 */
+ { 0x6b, 0x27 }, /* MDMA mode 1 */
+ { 0x69, 0x25 }, /* MDMA mode 2 */
+};
+
+static struct pdc2027x_udma_timing {
+ u8 value0, value1, value2;
+} pdc2027x_udma_timing_tbl [] = {
+ { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
+ { 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
+ { 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
+ { 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
+ { 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
+ { 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
+ { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
+};
+
+static const struct pci_device_id pdc2027x_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver pdc2027x_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pdc2027x_pci_tbl,
+ .probe = pdc2027x_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static struct scsi_host_template pdc2027x_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pdc2027x_pata100_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .check_atapi_dma = pdc2027x_check_atapi_dma,
+ .cable_detect = pdc2027x_cable_detect,
+ .prereset = pdc2027x_prereset,
+};
+
+static struct ata_port_operations pdc2027x_pata133_ops = {
+ .inherits = &pdc2027x_pata100_ops,
+ .mode_filter = pdc2027x_mode_filter,
+ .set_piomode = pdc2027x_set_piomode,
+ .set_dmamode = pdc2027x_set_dmamode,
+ .set_mode = pdc2027x_set_mode,
+};
+
+static struct ata_port_info pdc2027x_port_info[] = {
+ /* PDC_UDMA_100 */
+ {
+ .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
+ ATA_FLAG_MMIO,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &pdc2027x_pata100_ops,
+ },
+ /* PDC_UDMA_133 */
+ {
+ .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
+ ATA_FLAG_MMIO,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6, /* udma0-6 */
+ .port_ops = &pdc2027x_pata133_ops,
+ },
+};
+
+MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
+MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
+
+/**
+ * port_mmio - Get the MMIO address of PDC2027x extended registers
+ * @ap: Port
+ * @offset: offset from mmio base
+ */
+static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
+{
+ return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
+}
+
+/**
+ * dev_mmio - Get the MMIO address of PDC2027x extended registers
+ * @ap: Port
+ * @adev: device
+ * @offset: offset from mmio base
+ */
+static inline void __iomem *dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
+{
+ u8 adj = (adev->devno) ? 0x08 : 0x00;
+ return port_mmio(ap, offset) + adj;
+}
+
+/**
+ * pdc2027x_pata_cable_detect - Probe host controller cable detect info
+ * @ap: Port for which cable detect info is desired
+ *
+ * Read 80c cable indicator from Promise extended register.
+ * This register is latched when the system is reset.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static int pdc2027x_cable_detect(struct ata_port *ap)
+{
+ u32 cgcr;
+
+ /* check cable detect results */
+ cgcr = ioread32(port_mmio(ap, PDC_GLOBAL_CTL));
+ if (cgcr & (1 << 26))
+ goto cbl40;
+
+ PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
+
+ return ATA_CBL_PATA80;
+cbl40:
+ printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
+ * @ap: Port to check
+ */
+static inline int pdc2027x_port_enabled(struct ata_port *ap)
+{
+ return ioread8(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
+}
+
+/**
+ * pdc2027x_prereset - prereset for PATA host controller
+ * @link: Target link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Probeinit including cable detection.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline)
+{
+ /* Check whether port enabled */
+ if (!pdc2027x_port_enabled(link->ap))
+ return -ENOENT;
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * pdc2720x_mode_filter - mode selection filter
+ * @adev: ATA device
+ * @mask: list of modes proposed
+ *
+ * Block UDMA on devices that cause trouble with this controller.
+ */
+
+static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL)
+ return ata_bmdma_mode_filter(adev, mask);
+
+ /* Check for slave of a Maxtor at UDMA6 */
+ ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
+ ATA_ID_PROD_LEN + 1);
+ /* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
+ if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6)
+ mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
+
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * pdc2027x_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port to configure
+ * @adev: um
+ * @pio: PIO mode, 0 - 4
+ *
+ * Set PIO mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ u32 ctcr0, ctcr1;
+
+ PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
+
+ /* Sanity check */
+ if (pio > 4) {
+ printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
+ return;
+
+ }
+
+ /* Set the PIO timing registers using value table for 133MHz */
+ PDPRINTK("Set pio regs... \n");
+
+ ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
+ ctcr0 &= 0xffff0000;
+ ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
+ (pdc2027x_pio_timing_tbl[pio].value1 << 8);
+ iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+
+ ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+ ctcr1 &= 0x00ffffff;
+ ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
+ iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+ PDPRINTK("Set pio regs done\n");
+
+ PDPRINTK("Set to pio mode[%u] \n", pio);
+}
+
+/**
+ * pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
+ * @ap: Port to configure
+ * @adev: um
+ * @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
+ *
+ * Set UDMA mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int dma_mode = adev->dma_mode;
+ u32 ctcr0, ctcr1;
+
+ if ((dma_mode >= XFER_UDMA_0) &&
+ (dma_mode <= XFER_UDMA_6)) {
+ /* Set the UDMA timing registers with value table for 133MHz */
+ unsigned int udma_mode = dma_mode & 0x07;
+
+ if (dma_mode == XFER_UDMA_2) {
+ /*
+ * Turn off tHOLD.
+ * If tHOLD is '1', the hardware will add half clock for data hold time.
+ * This code segment seems to be no effect. tHOLD will be overwritten below.
+ */
+ ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+ iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
+ }
+
+ PDPRINTK("Set udma regs... \n");
+
+ ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
+ ctcr1 &= 0xff000000;
+ ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
+ (pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
+ (pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
+ iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
+
+ PDPRINTK("Set udma regs done\n");
+
+ PDPRINTK("Set to udma mode[%u] \n", udma_mode);
+
+ } else if ((dma_mode >= XFER_MW_DMA_0) &&
+ (dma_mode <= XFER_MW_DMA_2)) {
+ /* Set the MDMA timing registers with value table for 133MHz */
+ unsigned int mdma_mode = dma_mode & 0x07;
+
+ PDPRINTK("Set mdma regs... \n");
+ ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
+
+ ctcr0 &= 0x0000ffff;
+ ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
+ (pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
+
+ iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
+ PDPRINTK("Set mdma regs done\n");
+
+ PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
+ } else {
+ printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
+ }
+}
+
+/**
+ * pdc2027x_set_mode - Set the timing registers back to correct values.
+ * @link: link to configure
+ * @r_failed: Returned device for failure
+ *
+ * The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
+ * automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
+ * This function overwrites the possibly incorrect values set by the hardware to be correct.
+ */
+static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_device *dev;
+ int rc;
+
+ rc = ata_do_set_mode(link, r_failed);
+ if (rc < 0)
+ return rc;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+
+ pdc2027x_set_piomode(ap, dev);
+
+ /*
+ * Enable prefetch if the device support PIO only.
+ */
+ if (dev->xfer_shift == ATA_SHIFT_PIO) {
+ u32 ctcr1 = ioread32(dev_mmio(ap, dev, PDC_CTCR1));
+ ctcr1 |= (1 << 25);
+ iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
+
+ PDPRINTK("Turn on prefetch\n");
+ } else {
+ pdc2027x_set_dmamode(ap, dev);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
+ * @qc: Metadata associated with taskfile to check
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ *
+ * RETURNS: 0 when ATAPI DMA can be used
+ * 1 otherwise
+ */
+static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *cmd = qc->scsicmd;
+ u8 *scsicmd = cmd->cmnd;
+ int rc = 1; /* atapi dma off by default */
+
+ /*
+ * This workaround is from Promise's GPL driver.
+ * If ATAPI DMA is used for commands not in the
+ * following white list, say MODE_SENSE and REQUEST_SENSE,
+ * pdc2027x might hit the irq lost problem.
+ */
+ switch (scsicmd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case 0xad: /* READ_DVD_STRUCTURE */
+ case 0xbe: /* READ_CD */
+ /* ATAPI DMA is ok */
+ rc = 0;
+ break;
+ default:
+ ;
+ }
+
+ return rc;
+}
+
+/**
+ * pdc_read_counter - Read the ctr counter
+ * @host: target ATA host
+ */
+
+static long pdc_read_counter(struct ata_host *host)
+{
+ void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+ long counter;
+ int retry = 1;
+ u32 bccrl, bccrh, bccrlv, bccrhv;
+
+retry:
+ bccrl = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+ bccrh = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
+
+ /* Read the counter values again for verification */
+ bccrlv = ioread32(mmio_base + PDC_BYTE_COUNT) & 0x7fff;
+ bccrhv = ioread32(mmio_base + PDC_BYTE_COUNT + 0x100) & 0x7fff;
+
+ counter = (bccrh << 15) | bccrl;
+
+ PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
+ PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
+
+ /*
+ * The 30-bit decreasing counter are read by 2 pieces.
+ * Incorrect value may be read when both bccrh and bccrl are changing.
+ * Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
+ */
+ if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
+ retry--;
+ PDPRINTK("rereading counter\n");
+ goto retry;
+ }
+
+ return counter;
+}
+
+/**
+ * adjust_pll - Adjust the PLL input clock in Hz.
+ *
+ * @pdc_controller: controller specific information
+ * @host: target ATA host
+ * @pll_clock: The input of PLL in HZ
+ */
+static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int board_idx)
+{
+ void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+ u16 pll_ctl;
+ long pll_clock_khz = pll_clock / 1000;
+ long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
+ long ratio = pout_required / pll_clock_khz;
+ int F, R;
+
+ /* Sanity check */
+ if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
+ printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
+ return;
+ }
+
+#ifdef PDC_DEBUG
+ PDPRINTK("pout_required is %ld\n", pout_required);
+
+ /* Show the current clock value of PLL control register
+ * (maybe already configured by the firmware)
+ */
+ pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
+
+ PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+ /*
+ * Calculate the ratio of F, R and OD
+ * POUT = (F + 2) / (( R + 2) * NO)
+ */
+ if (ratio < 8600L) { /* 8.6x */
+ /* Using NO = 0x01, R = 0x0D */
+ R = 0x0d;
+ } else if (ratio < 12900L) { /* 12.9x */
+ /* Using NO = 0x01, R = 0x08 */
+ R = 0x08;
+ } else if (ratio < 16100L) { /* 16.1x */
+ /* Using NO = 0x01, R = 0x06 */
+ R = 0x06;
+ } else if (ratio < 64000L) { /* 64x */
+ R = 0x00;
+ } else {
+ /* Invalid ratio */
+ printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
+ return;
+ }
+
+ F = (ratio * (R+2)) / 1000 - 2;
+
+ if (unlikely(F < 0 || F > 127)) {
+ /* Invalid F */
+ printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
+ return;
+ }
+
+ PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
+
+ pll_ctl = (R << 8) | F;
+
+ PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
+
+ iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
+ ioread16(mmio_base + PDC_PLL_CTL); /* flush */
+
+ /* Wait the PLL circuit to be stable */
+ mdelay(30);
+
+#ifdef PDC_DEBUG
+ /*
+ * Show the current clock value of PLL control register
+ * (maybe configured by the firmware)
+ */
+ pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
+
+ PDPRINTK("pll_ctl[%X]\n", pll_ctl);
+#endif
+
+ return;
+}
+
+/**
+ * detect_pll_input_clock - Detect the PLL input clock in Hz.
+ * @host: target ATA host
+ * Ex. 16949000 on 33MHz PCI bus for pdc20275.
+ * Half of the PCI clock.
+ */
+static long pdc_detect_pll_input_clock(struct ata_host *host)
+{
+ void __iomem *mmio_base = host->iomap[PDC_MMIO_BAR];
+ u32 scr;
+ long start_count, end_count;
+ struct timeval start_time, end_time;
+ long pll_clock, usec_elapsed;
+
+ /* Start the test mode */
+ scr = ioread32(mmio_base + PDC_SYS_CTL);
+ PDPRINTK("scr[%X]\n", scr);
+ iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
+ ioread32(mmio_base + PDC_SYS_CTL); /* flush */
+
+ /* Read current counter value */
+ start_count = pdc_read_counter(host);
+ do_gettimeofday(&start_time);
+
+ /* Let the counter run for 100 ms. */
+ mdelay(100);
+
+ /* Read the counter values again */
+ end_count = pdc_read_counter(host);
+ do_gettimeofday(&end_time);
+
+ /* Stop the test mode */
+ scr = ioread32(mmio_base + PDC_SYS_CTL);
+ PDPRINTK("scr[%X]\n", scr);
+ iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
+ ioread32(mmio_base + PDC_SYS_CTL); /* flush */
+
+ /* calculate the input clock in Hz */
+ usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
+ (end_time.tv_usec - start_time.tv_usec);
+
+ pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
+ (100000000 / usec_elapsed);
+
+ PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
+ PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
+
+ return pll_clock;
+}
+
+/**
+ * pdc_hardware_init - Initialize the hardware.
+ * @host: target ATA host
+ * @board_idx: board identifier
+ */
+static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+{
+ long pll_clock;
+
+ /*
+ * Detect PLL input clock rate.
+ * On some system, where PCI bus is running at non-standard clock rate.
+ * Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
+ * The pdc20275 controller employs PLL circuit to help correct timing registers setting.
+ */
+ pll_clock = pdc_detect_pll_input_clock(host);
+
+ dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
+
+ /* Adjust PLL control register */
+ pdc_adjust_pll(host, pll_clock, board_idx);
+
+ return 0;
+}
+
+/**
+ * pdc_ata_setup_port - setup the mmio address
+ * @port: ata ioports to setup
+ * @base: base address
+ */
+static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+ port->cmd_addr =
+ port->data_addr = base;
+ port->feature_addr =
+ port->error_addr = base + 0x05;
+ port->nsect_addr = base + 0x0a;
+ port->lbal_addr = base + 0x0f;
+ port->lbam_addr = base + 0x10;
+ port->lbah_addr = base + 0x15;
+ port->device_addr = base + 0x1a;
+ port->command_addr =
+ port->status_addr = base + 0x1f;
+ port->altstatus_addr =
+ port->ctl_addr = base + 0x81a;
+}
+
+/**
+ * pdc2027x_init_one - PCI probe function
+ * Called when an instance of PCI adapter is inserted.
+ * This function checks whether the hardware is supported,
+ * initialize hardware and register an instance of ata_host to
+ * libata. (implements struct pci_driver.probe() )
+ *
+ * @pdev: instance of pci_dev found
+ * @ent: matching entry in the id_tbl[]
+ */
+static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
+ static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ const struct ata_port_info *ppi[] =
+ { &pdc2027x_port_info[board_idx], NULL };
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int i, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ mmio_base = host->iomap[PDC_MMIO_BAR];
+
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ pdc_ata_setup_port(&ap->ioaddr, mmio_base + cmd_offset[i]);
+ ap->ioaddr.bmdma_addr = mmio_base + bmdma_offset[i];
+
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, cmd_offset[i], "cmd");
+ }
+
+ //pci_enable_intx(pdev);
+
+ /* initialize adapter */
+ if (pdc_hardware_init(host, board_idx) != 0)
+ return -EIO;
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &pdc2027x_sht);
+}
+
+/**
+ * pdc2027x_init - Called after this module is loaded into the kernel.
+ */
+static int __init pdc2027x_init(void)
+{
+ return pci_register_driver(&pdc2027x_pci_driver);
+}
+
+/**
+ * pdc2027x_exit - Called before this module unloaded from the kernel
+ */
+static void __exit pdc2027x_exit(void)
+{
+ pci_unregister_driver(&pdc2027x_pci_driver);
+}
+
+module_init(pdc2027x_init);
+module_exit(pdc2027x_exit);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
new file mode 100644
index 0000000..799a6a0
--- /dev/null
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -0,0 +1,368 @@
+/*
+ * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
+ *
+ * First cut with LBA48/ATAPI
+ *
+ * TODO:
+ * Channel interlock/reset on both required ?
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_pdc202xx_old"
+#define DRV_VERSION "0.4.3"
+
+static int pdc2026x_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u16 cis;
+
+ pci_read_config_word(pdev, 0x50, &cis);
+ if (cis & (1 << (10 + ap->port_no)))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * pdc202xx_configure_piomode - set chip PIO timing
+ * @ap: ATA interface
+ * @adev: ATA device
+ * @pio: PIO mode
+ *
+ * Called to do the PIO mode setup. Our timing registers are shared
+ * so a configure_dmamode call will undo any work we do here and vice
+ * versa
+ */
+
+static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+ static u16 pio_timing[5] = {
+ 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
+ };
+ u8 r_ap, r_bp;
+
+ pci_read_config_byte(pdev, port, &r_ap);
+ pci_read_config_byte(pdev, port + 1, &r_bp);
+ r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
+ r_bp &= ~0x1F;
+ r_ap |= (pio_timing[pio] >> 8);
+ r_bp |= (pio_timing[pio] & 0xFF);
+
+ if (ata_pio_need_iordy(adev))
+ r_ap |= 0x20; /* IORDY enable */
+ if (adev->class == ATA_DEV_ATA)
+ r_ap |= 0x10; /* FIFO enable */
+ pci_write_config_byte(pdev, port, r_ap);
+ pci_write_config_byte(pdev, port + 1, r_bp);
+}
+
+/**
+ * pdc202xx_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. Our timing registers are shared
+ * but we want to set the PIO timing by default.
+ */
+
+static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ * pdc202xx_configure_dmamode - set DMA mode in chip
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Load DMA cycle times into the chip ready for a DMA transfer
+ * to occur.
+ */
+
+static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
+ static u8 udma_timing[6][2] = {
+ { 0x60, 0x03 }, /* 33 Mhz Clock */
+ { 0x40, 0x02 },
+ { 0x20, 0x01 },
+ { 0x40, 0x02 }, /* 66 Mhz Clock */
+ { 0x20, 0x01 },
+ { 0x20, 0x01 }
+ };
+ static u8 mdma_timing[3][2] = {
+ { 0xe0, 0x0f },
+ { 0x60, 0x04 },
+ { 0x60, 0x03 },
+ };
+ u8 r_bp, r_cp;
+
+ pci_read_config_byte(pdev, port + 1, &r_bp);
+ pci_read_config_byte(pdev, port + 2, &r_cp);
+
+ r_bp &= ~0xE0;
+ r_cp &= ~0x0F;
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ int speed = adev->dma_mode - XFER_UDMA_0;
+ r_bp |= udma_timing[speed][0];
+ r_cp |= udma_timing[speed][1];
+
+ } else {
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ r_bp |= mdma_timing[speed][0];
+ r_cp |= mdma_timing[speed][1];
+ }
+ pci_write_config_byte(pdev, port + 1, r_bp);
+ pci_write_config_byte(pdev, port + 2, r_cp);
+
+}
+
+/**
+ * pdc2026x_bmdma_start - DMA engine begin
+ * @qc: ATA command
+ *
+ * In UDMA3 or higher we have to clock switch for the duration of the
+ * DMA transfer sequence.
+ *
+ * Note: The host lock held by the libata layer protects
+ * us from two channels both trying to set DMA bits at once
+ */
+
+static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct ata_taskfile *tf = &qc->tf;
+ int sel66 = ap->port_no ? 0x08: 0x02;
+
+ void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+ void __iomem *clock = master + 0x11;
+ void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+ u32 len;
+
+ /* Check we keep host level locking here */
+ if (adev->dma_mode >= XFER_UDMA_2)
+ iowrite8(ioread8(clock) | sel66, clock);
+ else
+ iowrite8(ioread8(clock) & ~sel66, clock);
+
+ /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
+ and move to qc_issue ? */
+ pdc202xx_set_dmamode(ap, qc->dev);
+
+ /* Cases the state machine will not complete correctly without help */
+ if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATAPI_PROT_DMA) {
+ len = qc->nbytes / 2;
+
+ if (tf->flags & ATA_TFLAG_WRITE)
+ len |= 0x06000000;
+ else
+ len |= 0x05000000;
+
+ iowrite32(len, atapi_reg);
+ }
+
+ /* Activate DMA */
+ ata_bmdma_start(qc);
+}
+
+/**
+ * pdc2026x_bmdma_end - DMA engine stop
+ * @qc: ATA command
+ *
+ * After a DMA completes we need to put the clock back to 33MHz for
+ * PIO timings.
+ *
+ * Note: The host lock held by the libata layer protects
+ * us from two channels both trying to set DMA bits at once
+ */
+
+static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct ata_taskfile *tf = &qc->tf;
+
+ int sel66 = ap->port_no ? 0x08: 0x02;
+ /* The clock bits are in the same register for both channels */
+ void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
+ void __iomem *clock = master + 0x11;
+ void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
+
+ /* Cases the state machine will not complete correctly */
+ if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
+ iowrite32(0, atapi_reg);
+ iowrite8(ioread8(clock) & ~sel66, clock);
+ }
+ /* Flip back to 33Mhz for PIO */
+ if (adev->dma_mode >= XFER_UDMA_2)
+ iowrite8(ioread8(clock) & ~sel66, clock);
+ ata_bmdma_stop(qc);
+ pdc202xx_set_piomode(ap, adev);
+}
+
+/**
+ * pdc2026x_dev_config - device setup hook
+ * @adev: newly found device
+ *
+ * Perform chip specific early setup. We need to lock the transfer
+ * sizes to 8bit to avoid making the state engine on the 2026x cards
+ * barf.
+ */
+
+static void pdc2026x_dev_config(struct ata_device *adev)
+{
+ adev->max_sectors = 256;
+}
+
+static int pdc2026x_port_start(struct ata_port *ap)
+{
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+ if (bmdma) {
+ /* Enable burst mode */
+ u8 burst = ioread8(bmdma + 0x1f);
+ iowrite8(burst | 0x01, bmdma + 0x1f);
+ }
+ return ata_sff_port_start(ap);
+}
+
+/**
+ * pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
+ * @qc: Metadata associated with taskfile to check
+ *
+ * Just say no - not supported on older Promise.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ *
+ * RETURNS: 0 when ATAPI DMA can be used
+ * 1 otherwise
+ */
+
+static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return 1;
+}
+
+static struct scsi_host_template pdc202xx_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pdc2024x_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = pdc202xx_set_piomode,
+ .set_dmamode = pdc202xx_set_dmamode,
+};
+
+static struct ata_port_operations pdc2026x_port_ops = {
+ .inherits = &pdc2024x_port_ops,
+
+ .check_atapi_dma = pdc2026x_check_atapi_dma,
+ .bmdma_start = pdc2026x_bmdma_start,
+ .bmdma_stop = pdc2026x_bmdma_stop,
+
+ .cable_detect = pdc2026x_cable_detect,
+ .dev_config = pdc2026x_dev_config,
+
+ .port_start = pdc2026x_port_start,
+};
+
+static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info[3] = {
+ {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &pdc2024x_port_ops
+ },
+ {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &pdc2026x_port_ops
+ },
+ {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &pdc2026x_port_ops
+ }
+
+ };
+ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+
+ if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
+ struct pci_dev *bridge = dev->bus->self;
+ /* Don't grab anything behind a Promise I2O RAID */
+ if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
+ if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
+ return -ENODEV;
+ if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
+ return -ENODEV;
+ }
+ }
+ return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL);
+}
+
+static const struct pci_device_id pdc202xx[] = {
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
+ { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
+
+ { },
+};
+
+static struct pci_driver pdc202xx_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pdc202xx,
+ .probe = pdc202xx_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init pdc202xx_init(void)
+{
+ return pci_register_driver(&pdc202xx_pci_driver);
+}
+
+static void __exit pdc202xx_exit(void)
+{
+ pci_unregister_driver(&pdc202xx_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc202xx);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pdc202xx_init);
+module_exit(pdc202xx_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
new file mode 100644
index 0000000..77e4e3b
--- /dev/null
+++ b/drivers/ata/pata_platform.c
@@ -0,0 +1,280 @@
+/*
+ * Generic platform device PATA driver
+ *
+ * Copyright (C) 2006 - 2007 Paul Mundt
+ *
+ * Based on pata_pcmcia:
+ *
+ * Copyright 2005-2006 Red Hat Inc, all rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#define DRV_NAME "pata_platform"
+#define DRV_VERSION "1.2"
+
+static int pio_mask = 1;
+
+/*
+ * Provide our own set_mode() as we don't want to change anything that has
+ * already been configured..
+ */
+static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+ /* We don't really care */
+ dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ }
+ }
+ return 0;
+}
+
+static struct scsi_host_template pata_platform_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_platform_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = pata_platform_set_mode,
+ .port_start = ATA_OP_NULL,
+};
+
+static void pata_platform_setup_port(struct ata_ioports *ioaddr,
+ unsigned int shift)
+{
+ /* Fixup the port shift for platforms that need it */
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << shift);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << shift);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << shift);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << shift);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << shift);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << shift);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << shift);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << shift);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << shift);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << shift);
+}
+
+/**
+ * __pata_platform_probe - attach a platform interface
+ * @dev: device
+ * @io_res: Resource representing I/O base
+ * @ctl_res: Resource representing CTL base
+ * @irq_res: Resource representing IRQ and its flags
+ * @ioport_shift: I/O port shift
+ * @__pio_mask: PIO mask
+ *
+ * Register a platform bus IDE interface. Such interfaces are PIO and we
+ * assume do not support IRQ sharing.
+ *
+ * Platform devices are expected to contain at least 2 resources per port:
+ *
+ * - I/O Base (IORESOURCE_IO or IORESOURCE_MEM)
+ * - CTL Base (IORESOURCE_IO or IORESOURCE_MEM)
+ *
+ * and optionally:
+ *
+ * - IRQ (IORESOURCE_IRQ)
+ *
+ * If the base resources are both mem types, the ioremap() is handled
+ * here. For IORESOURCE_IO, it's assumed that there's no remapping
+ * necessary.
+ *
+ * If no IRQ resource is present, PIO polling mode is used instead.
+ */
+int __devinit __pata_platform_probe(struct device *dev,
+ struct resource *io_res,
+ struct resource *ctl_res,
+ struct resource *irq_res,
+ unsigned int ioport_shift,
+ int __pio_mask)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ unsigned int mmio;
+ int irq = 0;
+ int irq_flags = 0;
+
+ /*
+ * Check for MMIO
+ */
+ mmio = (( io_res->flags == IORESOURCE_MEM) &&
+ (ctl_res->flags == IORESOURCE_MEM));
+
+ /*
+ * And the IRQ
+ */
+ if (irq_res && irq_res->start > 0) {
+ irq = irq_res->start;
+ irq_flags = irq_res->flags;
+ }
+
+ /*
+ * Now that that's out of the way, wire up the port..
+ */
+ host = ata_host_alloc(dev, 1);
+ if (!host)
+ return -ENOMEM;
+ ap = host->ports[0];
+
+ ap->ops = &pata_platform_port_ops;
+ ap->pio_mask = __pio_mask;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ /*
+ * Use polling mode if there's no IRQ
+ */
+ if (!irq) {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
+
+ /*
+ * Handle the MMIO case
+ */
+ if (mmio) {
+ ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
+ io_res->end - io_res->start + 1);
+ ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
+ ctl_res->end - ctl_res->start + 1);
+ } else {
+ ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
+ io_res->end - io_res->start + 1);
+ ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
+ ctl_res->end - ctl_res->start + 1);
+ }
+ if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
+ dev_err(dev, "failed to map IO/CTL base\n");
+ return -ENOMEM;
+ }
+
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ pata_platform_setup_port(&ap->ioaddr, ioport_shift);
+
+ ata_port_desc(ap, "%s cmd 0x%llx ctl 0x%llx", mmio ? "mmio" : "ioport",
+ (unsigned long long)io_res->start,
+ (unsigned long long)ctl_res->start);
+
+ /* activate */
+ return ata_host_activate(host, irq, irq ? ata_sff_interrupt : NULL,
+ irq_flags, &pata_platform_sht);
+}
+EXPORT_SYMBOL_GPL(__pata_platform_probe);
+
+/**
+ * __pata_platform_remove - unplug a platform interface
+ * @dev: device
+ *
+ * A platform bus ATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+int __devexit __pata_platform_remove(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pata_platform_remove);
+
+static int __devinit pata_platform_probe(struct platform_device *pdev)
+{
+ struct resource *io_res;
+ struct resource *ctl_res;
+ struct resource *irq_res;
+ struct pata_platform_info *pp_info = pdev->dev.platform_data;
+
+ /*
+ * Simple resource validation ..
+ */
+ if ((pdev->num_resources != 3) && (pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the I/O base first
+ */
+ io_res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (io_res == NULL) {
+ io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(io_res == NULL))
+ return -EINVAL;
+ }
+
+ /*
+ * Then the CTL base
+ */
+ ctl_res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+ if (ctl_res == NULL) {
+ ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (unlikely(ctl_res == NULL))
+ return -EINVAL;
+ }
+
+ /*
+ * And the IRQ
+ */
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq_res)
+ irq_res->flags = pp_info ? pp_info->irq_flags : 0;
+
+ return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res,
+ pp_info ? pp_info->ioport_shift : 0,
+ pio_mask);
+}
+
+static int __devexit pata_platform_remove(struct platform_device *pdev)
+{
+ return __pata_platform_remove(&pdev->dev);
+}
+
+static struct platform_driver pata_platform_driver = {
+ .probe = pata_platform_probe,
+ .remove = __devexit_p(pata_platform_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pata_platform_init(void)
+{
+ return platform_driver_register(&pata_platform_driver);
+}
+
+static void __exit pata_platform_exit(void)
+{
+ platform_driver_unregister(&pata_platform_driver);
+}
+module_init(pata_platform_init);
+module_exit(pata_platform_exit);
+
+module_param(pio_mask, int, 0);
+
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("low-level driver for platform device ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
new file mode 100644
index 0000000..3080f37
--- /dev/null
+++ b/drivers/ata/pata_qdi.c
@@ -0,0 +1,366 @@
+/*
+ * pata_qdi.c - QDI VLB ATA controllers
+ * (C) 2006 Red Hat
+ *
+ * This driver mostly exists as a proof of concept for non PCI devices under
+ * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
+ * useful.
+ *
+ * Tuning code written from the documentation at
+ * http://www.ryston.cz/petr/vlb/qd6500.html
+ * http://www.ryston.cz/petr/vlb/qd6580.html
+ *
+ * Probe code based on drivers/ide/legacy/qd65xx.c
+ * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
+ * Samuel Thibault <samuel.thibault@fnac.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_qdi"
+#define DRV_VERSION "0.3.1"
+
+#define NR_HOST 4 /* Two 6580s */
+
+struct qdi_data {
+ unsigned long timing;
+ u8 clock[2];
+ u8 last;
+ int fast;
+ struct platform_device *platform_dev;
+
+};
+
+static struct ata_host *qdi_host[NR_HOST];
+static struct qdi_data qdi_data[NR_HOST];
+static int nr_qdi_host;
+
+#ifdef MODULE
+static int probe_qdi = 1;
+#else
+static int probe_qdi;
+#endif
+
+static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct qdi_data *qdi = ap->host->private_data;
+ int active, recovery;
+ u8 timing;
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ if (qdi->fast) {
+ active = 8 - clamp_val(t.active, 1, 8);
+ recovery = 18 - clamp_val(t.recover, 3, 18);
+ } else {
+ active = 9 - clamp_val(t.active, 2, 9);
+ recovery = 15 - clamp_val(t.recover, 0, 15);
+ }
+ timing = (recovery << 4) | active | 0x08;
+
+ qdi->clock[adev->devno] = timing;
+
+ outb(timing, qdi->timing);
+}
+
+static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct qdi_data *qdi = ap->host->private_data;
+ int active, recovery;
+ u8 timing;
+
+ /* Get the timing data in cycles */
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ if (qdi->fast) {
+ active = 8 - clamp_val(t.active, 1, 8);
+ recovery = 18 - clamp_val(t.recover, 3, 18);
+ } else {
+ active = 9 - clamp_val(t.active, 2, 9);
+ recovery = 15 - clamp_val(t.recover, 0, 15);
+ }
+ timing = (recovery << 4) | active | 0x08;
+
+ qdi->clock[adev->devno] = timing;
+
+ outb(timing, qdi->timing);
+
+ /* Clear the FIFO */
+ if (adev->class != ATA_DEV_ATA)
+ outb(0x5F, (qdi->timing & 0xFFF0) + 3);
+}
+
+/**
+ * qdi_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings.
+ */
+
+static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct qdi_data *qdi = ap->host->private_data;
+
+ if (qdi->clock[adev->devno] != qdi->last) {
+ if (adev->pio_mode) {
+ qdi->last = qdi->clock[adev->devno];
+ outb(qdi->clock[adev->devno], qdi->timing);
+ }
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ if (ata_id_has_dword_io(dev->id)) {
+ struct ata_port *ap = dev->link->ap;
+ int slop = buflen & 3;
+
+ if (rw == READ)
+ ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ else
+ iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+ if (unlikely(slop)) {
+ __le32 pad;
+ if (rw == READ) {
+ pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ memcpy(buf + buflen - slop, &pad, slop);
+ } else {
+ memcpy(&pad, buf + buflen - slop, slop);
+ iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ }
+ buflen += 4 - slop;
+ }
+ } else
+ buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ return buflen;
+}
+
+static struct scsi_host_template qdi_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations qdi6500_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .qc_issue = qdi_qc_issue,
+ .sff_data_xfer = qdi_data_xfer,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = qdi6500_set_piomode,
+};
+
+static struct ata_port_operations qdi6580_port_ops = {
+ .inherits = &qdi6500_port_ops,
+ .set_piomode = qdi6580_set_piomode,
+};
+
+/**
+ * qdi_init_one - attach a qdi interface
+ * @type: Type to display
+ * @io: I/O port start
+ * @irq: interrupt line
+ * @fast: True if on a > 33Mhz VLB
+ *
+ * Register an ISA bus IDE interface. Such interfaces are PIO and we
+ * assume do not support IRQ sharing.
+ */
+
+static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
+{
+ unsigned long ctl = io + 0x206;
+ struct platform_device *pdev;
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *io_addr, *ctl_addr;
+ int ret;
+
+ /*
+ * Fill in a probe structure first of all
+ */
+
+ pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = -ENOMEM;
+ io_addr = devm_ioport_map(&pdev->dev, io, 8);
+ ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1);
+ if (!io_addr || !ctl_addr)
+ goto fail;
+
+ ret = -ENOMEM;
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto fail;
+ ap = host->ports[0];
+
+ if (type == 6580) {
+ ap->ops = &qdi6580_port_ops;
+ ap->pio_mask = 0x1F;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ } else {
+ ap->ops = &qdi6500_port_ops;
+ ap->pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */
+ ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
+ }
+
+ ap->ioaddr.cmd_addr = io_addr;
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
+
+ /*
+ * Hook in a private data structure per channel
+ */
+ ap->private_data = &qdi_data[nr_qdi_host];
+
+ qdi_data[nr_qdi_host].timing = port;
+ qdi_data[nr_qdi_host].fast = fast;
+ qdi_data[nr_qdi_host].platform_dev = pdev;
+
+ printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
+
+ /* activate */
+ ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht);
+ if (ret)
+ goto fail;
+
+ qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
+ return 0;
+
+ fail:
+ platform_device_unregister(pdev);
+ return ret;
+}
+
+/**
+ * qdi_init - attach qdi interfaces
+ *
+ * Attach qdi IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int qdi_init(void)
+{
+ unsigned long flags;
+ static const unsigned long qd_port[2] = { 0x30, 0xB0 };
+ static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
+ static const int ide_irq[2] = { 14, 15 };
+
+ int ct = 0;
+ int i;
+
+ if (probe_qdi == 0)
+ return -ENODEV;
+
+ /*
+ * Check each possible QD65xx base address
+ */
+
+ for (i = 0; i < 2; i++) {
+ unsigned long port = qd_port[i];
+ u8 r, res;
+
+
+ if (request_region(port, 2, "pata_qdi")) {
+ /* Check for a card */
+ local_irq_save(flags);
+ r = inb_p(port);
+ outb_p(0x19, port);
+ res = inb_p(port);
+ outb_p(r, port);
+ local_irq_restore(flags);
+
+ /* Fail */
+ if (res == 0x19)
+ {
+ release_region(port, 2);
+ continue;
+ }
+
+ /* Passes the presence test */
+ r = inb_p(port + 1); /* Check port agrees with port set */
+ if ((r & 2) >> 1 != i) {
+ release_region(port, 2);
+ continue;
+ }
+
+ /* Check card type */
+ if ((r & 0xF0) == 0xC0) {
+ /* QD6500: single channel */
+ if (r & 8) {
+ /* Disabled ? */
+ release_region(port, 2);
+ continue;
+ }
+ if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
+ ct++;
+ }
+ if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
+ /* QD6580: dual channel */
+ if (!request_region(port + 2 , 2, "pata_qdi"))
+ {
+ release_region(port, 2);
+ continue;
+ }
+ res = inb(port + 3);
+ if (res & 1) {
+ /* Single channel mode */
+ if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
+ ct++;
+ } else {
+ /* Dual channel mode */
+ if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
+ ct++;
+ if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
+ ct++;
+ }
+ }
+ }
+ }
+ if (ct != 0)
+ return 0;
+ return -ENODEV;
+}
+
+static __exit void qdi_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_qdi_host; i++) {
+ ata_host_detach(qdi_host[i]);
+ /* Free the control resource. The 6580 dual channel has the resources
+ * claimed as a pair of 2 byte resources so we need no special cases...
+ */
+ release_region(qdi_data[i].timing, 2);
+ platform_device_unregister(qdi_data[i].platform_dev);
+ }
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for qdi ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(qdi_init);
+module_exit(qdi_exit);
+
+module_param(probe_qdi, int, 0);
+
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
new file mode 100644
index 0000000..0b0aa45
--- /dev/null
+++ b/drivers/ata/pata_radisys.c
@@ -0,0 +1,269 @@
+/*
+ * pata_radisys.c - Intel PATA/SATA controllers
+ *
+ * (C) 2006 Red Hat <alan@lxorguk.ukuu.org.uk>
+ *
+ * Some parts based on ata_piix.c by Jeff Garzik and others.
+ *
+ * A PIIX relative, this device has a single ATA channel and no
+ * slave timings, SITRE or PPE. In that sense it is a close relative
+ * of the original PIIX. It does however support UDMA 33/66 per channel
+ * although no other modes/timings. Also lacking is 32bit I/O on the ATA
+ * port.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+
+#define DRV_NAME "pata_radisys"
+#define DRV_VERSION "0.4.4"
+
+/**
+ * radisys_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: ATA port
+ * @adev: Device whose timings we are configuring
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u16 idetm_data;
+ int control = 0;
+
+ /*
+ * See Intel Document 298600-004 for the timing programing rules
+ * for PIIX/ICH. Note that the early PIIX does not have the slave
+ * timing port at 0x44. The Radisys is a relative of the PIIX
+ * but not the same so be careful.
+ */
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 }, /* Check me */
+ { 0, 0 },
+ { 1, 1 },
+ { 2, 2 },
+ { 3, 3 }, };
+
+ if (pio > 0)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev))
+ control |= 2; /* IE IORDY */
+
+ pci_read_config_word(dev, 0x40, &idetm_data);
+
+ /* Enable IE and TIME as appropriate. Clear the other
+ drive timing bits */
+ idetm_data &= 0xCCCC;
+ idetm_data |= (control << (4 * adev->devno));
+ idetm_data |= (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ pci_write_config_word(dev, 0x40, idetm_data);
+
+ /* Track which port is configured */
+ ap->private_data = adev;
+}
+
+/**
+ * radisys_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ * @isich: True if the device is an ICH and has IOCFG registers
+ *
+ * Set MWDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u16 idetm_data;
+ u8 udma_enable;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 1 },
+ { 2, 2 },
+ { 3, 3 }, };
+
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally.
+ */
+
+ pci_read_config_word(dev, 0x40, &idetm_data);
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+ int control = 3; /* IORDY|TIME0 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO0 for PIO cycles. */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ control = 1;
+
+ /* Mask out the relevant control and timing bits we will load. Also
+ clear the other drive TIME register as a precaution */
+
+ idetm_data &= 0xCCCC;
+ idetm_data |= control << (4 * adev->devno);
+ idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+
+ udma_enable &= ~(1 << adev->devno);
+ } else {
+ u8 udma_mode;
+
+ /* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
+
+ pci_read_config_byte(dev, 0x4A, &udma_mode);
+
+ if (adev->xfer_mode == XFER_UDMA_2)
+ udma_mode &= ~ (1 << adev->devno);
+ else /* UDMA 4 */
+ udma_mode |= (1 << adev->devno);
+
+ pci_write_config_byte(dev, 0x4A, udma_mode);
+
+ udma_enable |= (1 << adev->devno);
+ }
+ pci_write_config_word(dev, 0x40, idetm_data);
+ pci_write_config_byte(dev, 0x48, udma_enable);
+
+ /* Track which port is configured */
+ ap->private_data = adev;
+}
+
+/**
+ * radisys_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary. Our logic also clears TIME0/TIME1 for the other device so
+ * that, even if we get this wrong, cycles to the other device will
+ * be made PIO0.
+ */
+
+static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ if (adev != ap->private_data) {
+ /* UDMA timing is not shared */
+ if (adev->dma_mode < XFER_UDMA_0) {
+ if (adev->dma_mode)
+ radisys_set_dmamode(ap, adev);
+ else if (adev->pio_mode)
+ radisys_set_piomode(ap, adev);
+ }
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+
+static struct scsi_host_template radisys_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations radisys_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_issue = radisys_qc_issue,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = radisys_set_piomode,
+ .set_dmamode = radisys_set_dmamode,
+};
+
+
+/**
+ * radisys_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in radisys_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma1-2 */
+ .udma_mask = 0x14, /* UDMA33/66 only */
+ .port_ops = &radisys_pata_ops,
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL);
+}
+
+static const struct pci_device_id radisys_pci_tbl[] = {
+ { PCI_VDEVICE(RADISYS, 0x8201), },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver radisys_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = radisys_pci_tbl,
+ .probe = radisys_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init radisys_init(void)
+{
+ return pci_register_driver(&radisys_pci_driver);
+}
+
+static void __exit radisys_exit(void)
+{
+ pci_unregister_driver(&radisys_pci_driver);
+}
+
+module_init(radisys_init);
+module_exit(radisys_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
new file mode 100644
index 0000000..c2e6fb9
--- /dev/null
+++ b/drivers/ata/pata_rb532_cf.c
@@ -0,0 +1,284 @@
+/*
+ * A low-level PATA driver to handle a Compact Flash connected on the
+ * Mikrotik's RouterBoard 532 board.
+ *
+ * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ *
+ * This file was based on: drivers/ata/pata_ixp4xx_cf.c
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * Also was based on the driver for Linux 2.4.xx published by Mikrotik for
+ * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
+ * seems not to have a license.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/gpio.h>
+
+#define DRV_NAME "pata-rb532-cf"
+#define DRV_VERSION "0.1.0"
+#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
+
+#define RB500_CF_MAXPORTS 1
+#define RB500_CF_IO_DELAY 400
+
+#define RB500_CF_REG_BASE 0x0800
+#define RB500_CF_REG_ERR 0x080D
+#define RB500_CF_REG_CTRL 0x080E
+/* 32bit buffered data register offset */
+#define RB500_CF_REG_DBUF32 0x0C00
+
+struct rb532_cf_info {
+ void __iomem *iobase;
+ unsigned int gpio_line;
+ int frozen;
+ unsigned int irq;
+};
+
+/* ------------------------------------------------------------------------ */
+
+static inline void rb532_pata_finish_io(struct ata_port *ap)
+{
+ struct ata_host *ah = ap->host;
+ struct rb532_cf_info *info = ah->private_data;
+
+ /* FIXME: Keep previous delay. If this is merely a fence then
+ ata_sff_sync might be sufficient. */
+ ata_sff_dma_pause(ap);
+ ndelay(RB500_CF_IO_DELAY);
+
+ set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static void rb532_pata_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ writeb(tf->command, ap->ioaddr.command_addr);
+ rb532_pata_finish_io(ap);
+}
+
+static unsigned int rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
+ unsigned int buflen, int write_data)
+{
+ struct ata_port *ap = adev->link->ap;
+ void __iomem *ioaddr = ap->ioaddr.data_addr;
+ int retlen = buflen;
+
+ if (write_data) {
+ for (; buflen > 0; buflen--, buf++)
+ writeb(*buf, ioaddr);
+ } else {
+ for (; buflen > 0; buflen--, buf++)
+ *buf = readb(ioaddr);
+ }
+
+ rb532_pata_finish_io(adev->link->ap);
+ return retlen;
+}
+
+static void rb532_pata_freeze(struct ata_port *ap)
+{
+ struct rb532_cf_info *info = ap->host->private_data;
+
+ info->frozen = 1;
+}
+
+static void rb532_pata_thaw(struct ata_port *ap)
+{
+ struct rb532_cf_info *info = ap->host->private_data;
+
+ info->frozen = 0;
+}
+
+static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
+{
+ struct ata_host *ah = dev_instance;
+ struct rb532_cf_info *info = ah->private_data;
+
+ if (gpio_get_value(info->gpio_line)) {
+ set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
+ if (!info->frozen)
+ ata_sff_interrupt(info->irq, dev_instance);
+ } else {
+ set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ata_port_operations rb532_pata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_exec_command = rb532_pata_exec_command,
+ .sff_data_xfer = rb532_pata_data_xfer,
+ .freeze = rb532_pata_freeze,
+ .thaw = rb532_pata_thaw,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static struct scsi_host_template rb532_pata_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+/* ------------------------------------------------------------------------ */
+
+static void rb532_pata_setup_ports(struct ata_host *ah)
+{
+ struct rb532_cf_info *info = ah->private_data;
+ struct ata_port *ap;
+
+ ap = ah->ports[0];
+
+ ap->ops = &rb532_pata_port_ops;
+ ap->pio_mask = 0x1f; /* PIO4 */
+ ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
+
+ ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
+ ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
+ ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32;
+ ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
+}
+
+static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ int gpio;
+ struct resource *res;
+ struct ata_host *ah;
+ struct rb532_cf_info *info;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no IOMEM resource found\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ return -ENOENT;
+ }
+
+ gpio = irq_to_gpio(irq);
+ if (gpio < 0) {
+ dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
+ return -ENOENT;
+ }
+
+ ret = gpio_request(gpio, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "GPIO request failed\n");
+ return ret;
+ }
+
+ /* allocate host */
+ ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
+ if (!ah)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ah);
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ah->private_data = info;
+ info->gpio_line = gpio;
+ info->irq = irq;
+
+ info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!info->iobase)
+ return -ENOMEM;
+
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
+ ret);
+ goto err_free_gpio;
+ }
+
+ rb532_pata_setup_ports(ah);
+
+ ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
+ IRQF_TRIGGER_LOW, &rb532_pata_sht);
+ if (ret)
+ goto err_free_gpio;
+
+ return 0;
+
+err_free_gpio:
+ gpio_free(gpio);
+
+ return ret;
+}
+
+static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
+{
+ struct ata_host *ah = platform_get_drvdata(pdev);
+ struct rb532_cf_info *info = ah->private_data;
+
+ ata_host_detach(ah);
+ gpio_free(info->gpio_line);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" DRV_NAME);
+
+static struct platform_driver rb532_pata_platform_driver = {
+ .probe = rb532_pata_driver_probe,
+ .remove = __devexit_p(rb532_pata_driver_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ------------------------------------------------------------------------ */
+
+#define DRV_INFO DRV_DESC " version " DRV_VERSION
+
+static int __init rb532_pata_module_init(void)
+{
+ printk(KERN_INFO DRV_INFO "\n");
+
+ return platform_driver_register(&rb532_pata_platform_driver);
+}
+
+static void __exit rb532_pata_module_exit(void)
+{
+ platform_driver_unregister(&rb532_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+module_init(rb532_pata_module_init);
+module_exit(rb532_pata_module_exit);
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
new file mode 100644
index 0000000..7dfd1f3
--- /dev/null
+++ b/drivers/ata/pata_rz1000.c
@@ -0,0 +1,156 @@
+/*
+ * RZ1000/1001 driver based upon
+ *
+ * linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003
+ * Copyright (C) 1995-1998 Linus Torvalds & author (see below)
+ * Principal Author: mlord@pobox.com (Mark Lord)
+ *
+ * See linux/MAINTAINERS for address of current maintainer.
+ *
+ * This file provides support for disabling the buggy read-ahead
+ * mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_rz1000"
+#define DRV_VERSION "0.2.4"
+
+
+/**
+ * rz1000_set_mode - mode setting function
+ * @link: ATA link
+ * @unused: returned device on set_mode failure
+ *
+ * Use a non standard set_mode function. We don't want to be tuned. We
+ * would prefer to be BIOS generic but for the fact our hardware is
+ * whacked out.
+ */
+
+static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+
+ ata_link_for_each_dev(dev, link) {
+ if (ata_dev_enabled(dev)) {
+ /* We don't really care */
+ dev->pio_mode = XFER_PIO_0;
+ dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ }
+ }
+ return 0;
+}
+
+
+static struct scsi_host_template rz1000_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations rz1000_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .cable_detect = ata_cable_40wire,
+ .set_mode = rz1000_set_mode,
+};
+
+static int rz1000_fifo_disable(struct pci_dev *pdev)
+{
+ u16 reg;
+ /* Be exceptionally paranoid as we must be sure to apply the fix */
+ if (pci_read_config_word(pdev, 0x40, &reg) != 0)
+ return -1;
+ reg &= 0xDFFF;
+ if (pci_write_config_word(pdev, 0x40, reg) != 0)
+ return -1;
+ printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+ return 0;
+}
+
+/**
+ * rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in rz1000_pci_tbl matching with @pdev
+ *
+ * Configure an RZ1000 interface. This doesn't require much special
+ * handling except that we *MUST* kill the chipset readahead or the
+ * user may experience data corruption.
+ */
+
+static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &rz1000_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+
+ if (!printed_version++)
+ printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+ if (rz1000_fifo_disable(pdev) == 0)
+ return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL);
+
+ printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
+ /* Not safe to use so skip */
+ return -ENODEV;
+}
+
+#ifdef CONFIG_PM
+static int rz1000_reinit_one(struct pci_dev *pdev)
+{
+ /* If this fails on resume (which is a "cant happen" case), we
+ must stop as any progress risks data loss */
+ if (rz1000_fifo_disable(pdev))
+ panic("rz1000 fifo");
+ return ata_pci_device_resume(pdev);
+}
+#endif
+
+static const struct pci_device_id pata_rz1000[] = {
+ { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
+ { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
+
+ { },
+};
+
+static struct pci_driver rz1000_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pata_rz1000,
+ .probe = rz1000_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = rz1000_reinit_one,
+#endif
+};
+
+static int __init rz1000_init(void)
+{
+ return pci_register_driver(&rz1000_pci_driver);
+}
+
+static void __exit rz1000_exit(void)
+{
+ pci_unregister_driver(&rz1000_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pata_rz1000);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(rz1000_init);
+module_exit(rz1000_exit);
+
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
new file mode 100644
index 0000000..9a4bdca
--- /dev/null
+++ b/drivers/ata/pata_sc1200.c
@@ -0,0 +1,253 @@
+/*
+ * New ATA layer SC1200 driver Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * TODO: Mode selection filtering
+ * TODO: Can't enable second channel until ATA core has serialize
+ * TODO: Needs custom DMA cleanup code
+ *
+ * Based very heavily on
+ *
+ * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
+ *
+ * Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Development of this chipset driver was funded
+ * by the nice folks at National Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sc1200"
+#define DRV_VERSION "0.2.6"
+
+#define SC1200_REV_A 0x00
+#define SC1200_REV_B1 0x01
+#define SC1200_REV_B3 0x02
+#define SC1200_REV_C1 0x03
+#define SC1200_REV_D1 0x04
+
+/**
+ * sc1200_clock - PCI clock
+ *
+ * Return the PCI bus clocking for the SC1200 chipset configuration
+ * in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
+ */
+
+static int sc1200_clock(void)
+{
+ /* Magic registers that give us the chipset data */
+ u8 chip_id = inb(0x903C);
+ u8 silicon_rev = inb(0x903D);
+ u16 pci_clock;
+
+ if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
+ return 0; /* 33 MHz mode */
+
+ /* Clock generator configuration 0x901E its 8/9 are the PCI clocking
+ 0/3 is 33Mhz 1 is 48 2 is 66 */
+
+ pci_clock = inw(0x901E);
+ pci_clock >>= 8;
+ pci_clock &= 0x03;
+ if (pci_clock == 3)
+ pci_clock = 0;
+ return pci_clock;
+}
+
+/**
+ * sc1200_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Set our PIO requirements. This is fairly simple on the SC1200
+ */
+
+static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u32 pio_timings[4][5] = {
+ {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
+ {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
+ {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
+ {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
+ };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 format;
+ unsigned int reg = 0x40 + 0x10 * ap->port_no;
+ int mode = adev->pio_mode - XFER_PIO_0;
+
+ pci_read_config_dword(pdev, reg + 4, &format);
+ format >>= 31;
+ format += sc1200_clock();
+ pci_write_config_dword(pdev, reg + 8 * adev->devno,
+ pio_timings[format][mode]);
+}
+
+/**
+ * sc1200_set_dmamode - DMA timing setup
+ * @ap: ATA interface
+ * @adev: Device being configured
+ *
+ * We cannot mix MWDMA and UDMA without reloading timings each switch
+ * master to slave.
+ */
+
+static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u32 udma_timing[3][3] = {
+ { 0x00921250, 0x00911140, 0x00911030 },
+ { 0x00932470, 0x00922260, 0x00922140 },
+ { 0x009436A1, 0x00933481, 0x00923261 }
+ };
+
+ static const u32 mwdma_timing[3][3] = {
+ { 0x00077771, 0x00012121, 0x00002020 },
+ { 0x000BBBB2, 0x00024241, 0x00013131 },
+ { 0x000FFFF3, 0x00035352, 0x00015151 }
+ };
+
+ int clock = sc1200_clock();
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned int reg = 0x40 + 0x10 * ap->port_no;
+ int mode = adev->dma_mode;
+ u32 format;
+
+ if (mode >= XFER_UDMA_0)
+ format = udma_timing[clock][mode - XFER_UDMA_0];
+ else
+ format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
+
+ if (adev->devno == 0) {
+ u32 timings;
+
+ pci_read_config_dword(pdev, reg + 4, &timings);
+ timings &= 0x80000000UL;
+ timings |= format;
+ pci_write_config_dword(pdev, reg + 4, timings);
+ } else
+ pci_write_config_dword(pdev, reg + 12, format);
+}
+
+/**
+ * sc1200_qc_issue - command issue
+ * @qc: command pending
+ *
+ * Called when the libata layer is about to issue a command. We wrap
+ * this interface so that we can load the correct ATA timings if
+ * necessary. Specifically we have a problem that there is only
+ * one MWDMA/UDMA bit.
+ */
+
+static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+ struct ata_device *prev = ap->private_data;
+
+ /* See if the DMA settings could be wrong */
+ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) {
+ /* Maybe, but do the channels match MWDMA/UDMA ? */
+ if ((ata_using_udma(adev) && !ata_using_udma(prev)) ||
+ (ata_using_udma(prev) && !ata_using_udma(adev)))
+ /* Switch the mode bits */
+ sc1200_set_dmamode(ap, adev);
+ }
+
+ return ata_sff_qc_issue(qc);
+}
+
+static struct scsi_host_template sc1200_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct ata_port_operations sc1200_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_sff_dumb_qc_prep,
+ .qc_issue = sc1200_qc_issue,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = sc1200_set_piomode,
+ .set_dmamode = sc1200_set_dmamode,
+};
+
+/**
+ * sc1200_init_one - Initialise an SC1200
+ * @dev: PCI device
+ * @id: Entry in match table
+ *
+ * Just throw the needed data at the libata helper and it does all
+ * our work.
+ */
+
+static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07,
+ .port_ops = &sc1200_port_ops
+ };
+ /* Can't enable port 2 yet, see top comments */
+ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
+
+ return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL);
+}
+
+static const struct pci_device_id sc1200[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
+
+ { },
+};
+
+static struct pci_driver sc1200_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sc1200,
+ .probe = sc1200_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init sc1200_init(void)
+{
+ return pci_register_driver(&sc1200_pci_driver);
+}
+
+static void __exit sc1200_exit(void)
+{
+ pci_unregister_driver(&sc1200_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox, Mark Lord");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sc1200);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sc1200_init);
+module_exit(sc1200_exit);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
new file mode 100644
index 0000000..cf3707e
--- /dev/null
+++ b/drivers/ata/pata_scc.c
@@ -0,0 +1,1189 @@
+/*
+ * Support for IDE interfaces on Celleb platform
+ *
+ * (C) Copyright 2006 TOSHIBA CORPORATION
+ *
+ * This code is based on drivers/ata/ata_piix.c:
+ * Copyright 2003-2005 Red Hat Inc
+ * Copyright 2003-2005 Jeff Garzik
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat Inc
+ *
+ * and drivers/ata/ahci.c:
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ * and drivers/ata/libata-core.c:
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_scc"
+#define DRV_VERSION "0.3"
+
+#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
+
+/* PCI BARs */
+#define SCC_CTRL_BAR 0
+#define SCC_BMID_BAR 1
+
+/* offset of CTRL registers */
+#define SCC_CTL_PIOSHT 0x000
+#define SCC_CTL_PIOCT 0x004
+#define SCC_CTL_MDMACT 0x008
+#define SCC_CTL_MCRCST 0x00C
+#define SCC_CTL_SDMACT 0x010
+#define SCC_CTL_SCRCST 0x014
+#define SCC_CTL_UDENVT 0x018
+#define SCC_CTL_TDVHSEL 0x020
+#define SCC_CTL_MODEREG 0x024
+#define SCC_CTL_ECMODE 0xF00
+#define SCC_CTL_MAEA0 0xF50
+#define SCC_CTL_MAEC0 0xF54
+#define SCC_CTL_CCKCTRL 0xFF0
+
+/* offset of BMID registers */
+#define SCC_DMA_CMD 0x000
+#define SCC_DMA_STATUS 0x004
+#define SCC_DMA_TABLE_OFS 0x008
+#define SCC_DMA_INTMASK 0x010
+#define SCC_DMA_INTST 0x014
+#define SCC_DMA_PTERADD 0x018
+#define SCC_REG_CMD_ADDR 0x020
+#define SCC_REG_DATA 0x000
+#define SCC_REG_ERR 0x004
+#define SCC_REG_FEATURE 0x004
+#define SCC_REG_NSECT 0x008
+#define SCC_REG_LBAL 0x00C
+#define SCC_REG_LBAM 0x010
+#define SCC_REG_LBAH 0x014
+#define SCC_REG_DEVICE 0x018
+#define SCC_REG_STATUS 0x01C
+#define SCC_REG_CMD 0x01C
+#define SCC_REG_ALTSTATUS 0x020
+
+/* register value */
+#define TDVHSEL_MASTER 0x00000001
+#define TDVHSEL_SLAVE 0x00000004
+
+#define MODE_JCUSFEN 0x00000080
+
+#define ECMODE_VALUE 0x01
+
+#define CCKCTRL_ATARESET 0x00040000
+#define CCKCTRL_BUFCNT 0x00020000
+#define CCKCTRL_CRST 0x00010000
+#define CCKCTRL_OCLKEN 0x00000100
+#define CCKCTRL_ATACLKOEN 0x00000002
+#define CCKCTRL_LCLKEN 0x00000001
+
+#define QCHCD_IOS_SS 0x00000001
+
+#define QCHSD_STPDIAG 0x00020000
+
+#define INTMASK_MSK 0xD1000012
+#define INTSTS_SERROR 0x80000000
+#define INTSTS_PRERR 0x40000000
+#define INTSTS_RERR 0x10000000
+#define INTSTS_ICERR 0x01000000
+#define INTSTS_BMSINT 0x00000010
+#define INTSTS_BMHE 0x00000008
+#define INTSTS_IOIRQS 0x00000004
+#define INTSTS_INTRQ 0x00000002
+#define INTSTS_ACTEINT 0x00000001
+
+
+/* PIO transfer mode table */
+/* JCHST */
+static const unsigned long JCHSTtbl[2][7] = {
+ {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
+ {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
+};
+
+/* JCHHT */
+static const unsigned long JCHHTtbl[2][7] = {
+ {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
+ {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
+};
+
+/* JCHCT */
+static const unsigned long JCHCTtbl[2][7] = {
+ {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
+ {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
+};
+
+/* DMA transfer mode table */
+/* JCHDCTM/JCHDCTS */
+static const unsigned long JCHDCTxtbl[2][7] = {
+ {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
+ {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
+};
+
+/* JCSTWTM/JCSTWTS */
+static const unsigned long JCSTWTxtbl[2][7] = {
+ {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
+ {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
+};
+
+/* JCTSS */
+static const unsigned long JCTSStbl[2][7] = {
+ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
+ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
+};
+
+/* JCENVT */
+static const unsigned long JCENVTtbl[2][7] = {
+ {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
+ {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
+};
+
+/* JCACTSELS/JCACTSELM */
+static const unsigned long JCACTSELtbl[2][7] = {
+ {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
+};
+
+static const struct pci_device_id scc_pci_tbl[] = {
+ {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { } /* terminate list */
+};
+
+/**
+ * scc_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
+ void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
+ unsigned long reg;
+ int offset;
+
+ reg = in_be32(cckctrl_port);
+ if (reg & CCKCTRL_ATACLKOEN)
+ offset = 1; /* 133MHz */
+ else
+ offset = 0; /* 100MHz */
+
+ reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
+ out_be32(piosht_port, reg);
+ reg = JCHCTtbl[offset][pio];
+ out_be32(pioct_port, reg);
+}
+
+/**
+ * scc_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ * @udma: udma mode, 0 - 6
+ *
+ * Set UDMA mode for device.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int udma = adev->dma_mode;
+ unsigned int is_slave = (adev->devno != 0);
+ u8 speed = udma;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
+ void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
+ void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
+ void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
+ void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
+ void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
+ int offset, idx;
+
+ if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
+ offset = 1; /* 133MHz */
+ else
+ offset = 0; /* 100MHz */
+
+ if (speed >= XFER_UDMA_0)
+ idx = speed - XFER_UDMA_0;
+ else
+ return;
+
+ if (is_slave) {
+ out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
+ out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
+ out_be32(tdvhsel_port,
+ (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
+ } else {
+ out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
+ out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
+ out_be32(tdvhsel_port,
+ (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
+ }
+ out_be32(udenvt_port,
+ JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
+}
+
+unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
+ if (adev->class == ATA_DEV_ATAPI &&
+ (mask & (0xE0 << ATA_SHIFT_UDMA))) {
+ printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
+ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+ }
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * scc_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_sff_tf_load().
+ */
+
+static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ out_be32(ioaddr->ctl_addr, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ out_be32(ioaddr->feature_addr, tf->hob_feature);
+ out_be32(ioaddr->nsect_addr, tf->hob_nsect);
+ out_be32(ioaddr->lbal_addr, tf->hob_lbal);
+ out_be32(ioaddr->lbam_addr, tf->hob_lbam);
+ out_be32(ioaddr->lbah_addr, tf->hob_lbah);
+ VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+ tf->hob_feature,
+ tf->hob_nsect,
+ tf->hob_lbal,
+ tf->hob_lbam,
+ tf->hob_lbah);
+ }
+
+ if (is_addr) {
+ out_be32(ioaddr->feature_addr, tf->feature);
+ out_be32(ioaddr->nsect_addr, tf->nsect);
+ out_be32(ioaddr->lbal_addr, tf->lbal);
+ out_be32(ioaddr->lbam_addr, tf->lbam);
+ out_be32(ioaddr->lbah_addr, tf->lbah);
+ VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+ tf->feature,
+ tf->nsect,
+ tf->lbal,
+ tf->lbam,
+ tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ out_be32(ioaddr->device_addr, tf->device);
+ VPRINTK("device 0x%X\n", tf->device);
+ }
+
+ ata_wait_idle(ap);
+}
+
+/**
+ * scc_check_status - Read device status reg & clear interrupt
+ * @ap: port where the device is
+ *
+ * Note: Original code is ata_check_status().
+ */
+
+static u8 scc_check_status (struct ata_port *ap)
+{
+ return in_be32(ap->ioaddr.status_addr);
+}
+
+/**
+ * scc_tf_read - input device's ATA taskfile shadow registers
+ * @ap: Port from which input is read
+ * @tf: ATA taskfile register set for storing input
+ *
+ * Note: Original code is ata_sff_tf_read().
+ */
+
+static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ tf->command = scc_check_status(ap);
+ tf->feature = in_be32(ioaddr->error_addr);
+ tf->nsect = in_be32(ioaddr->nsect_addr);
+ tf->lbal = in_be32(ioaddr->lbal_addr);
+ tf->lbam = in_be32(ioaddr->lbam_addr);
+ tf->lbah = in_be32(ioaddr->lbah_addr);
+ tf->device = in_be32(ioaddr->device_addr);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
+ tf->hob_feature = in_be32(ioaddr->error_addr);
+ tf->hob_nsect = in_be32(ioaddr->nsect_addr);
+ tf->hob_lbal = in_be32(ioaddr->lbal_addr);
+ tf->hob_lbam = in_be32(ioaddr->lbam_addr);
+ tf->hob_lbah = in_be32(ioaddr->lbah_addr);
+ out_be32(ioaddr->ctl_addr, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+/**
+ * scc_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Note: Original code is ata_sff_exec_command().
+ */
+
+static void scc_exec_command (struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
+
+ out_be32(ap->ioaddr.command_addr, tf->command);
+ ata_sff_pause(ap);
+}
+
+/**
+ * scc_check_altstatus - Read device alternate status reg
+ * @ap: port where the device is
+ */
+
+static u8 scc_check_altstatus (struct ata_port *ap)
+{
+ return in_be32(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ * scc_dev_select - Select device 0/1 on ATA bus
+ * @ap: ATA channel to manipulate
+ * @device: ATA device (numbered from zero) to select
+ *
+ * Note: Original code is ata_sff_dev_select().
+ */
+
+static void scc_dev_select (struct ata_port *ap, unsigned int device)
+{
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ out_be32(ap->ioaddr.device_addr, tmp);
+ ata_sff_pause(ap);
+}
+
+/**
+ * scc_bmdma_setup - Set up PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_setup().
+ */
+
+static void scc_bmdma_setup (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* load PRD table addr */
+ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = in_be32(mmio + SCC_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ out_be32(mmio + SCC_DMA_CMD, dmactl);
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ * scc_bmdma_start - Start a PCI IDE BMDMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void scc_bmdma_start (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 dmactl;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* start host DMA transaction */
+ dmactl = in_be32(mmio + SCC_DMA_CMD);
+ out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
+}
+
+/**
+ * scc_devchk - PATA device presence detection
+ * @ap: ATA channel to examine
+ * @device: Device to examine (starting at zero)
+ *
+ * Note: Original code is ata_devchk().
+ */
+
+static unsigned int scc_devchk (struct ata_port *ap,
+ unsigned int device)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 nsect, lbal;
+
+ ap->ops->sff_dev_select(ap, device);
+
+ out_be32(ioaddr->nsect_addr, 0x55);
+ out_be32(ioaddr->lbal_addr, 0xaa);
+
+ out_be32(ioaddr->nsect_addr, 0xaa);
+ out_be32(ioaddr->lbal_addr, 0x55);
+
+ out_be32(ioaddr->nsect_addr, 0x55);
+ out_be32(ioaddr->lbal_addr, 0xaa);
+
+ nsect = in_be32(ioaddr->nsect_addr);
+ lbal = in_be32(ioaddr->lbal_addr);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return 1; /* we found a device */
+
+ return 0; /* nothing found */
+}
+
+/**
+ * scc_wait_after_reset - wait for devices to become ready after reset
+ *
+ * Note: Original code is ata_sff_wait_after_reset
+ */
+
+int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ int rc, ret = 0;
+
+ /* Spec mandates ">= 2ms" before checking status. We wait
+ * 150ms, because that was the magic delay used for ATAPI
+ * devices in Hale Landis's ATADRVR, for the period of time
+ * between when the ATA command register is written, and then
+ * status is checked. Because waiting for "a while" before
+ * checking status is fine, post SRST, we perform this magic
+ * delay here as well.
+ *
+ * Old drivers/ide uses the 2mS rule and then waits for ready.
+ */
+ msleep(150);
+
+ /* always check readiness of the master device */
+ rc = ata_sff_wait_ready(link, deadline);
+ /* -ENODEV means the odd clown forgot the D7 pulldown resistor
+ * and TF status is 0xff, bail out on it too.
+ */
+ if (rc)
+ return rc;
+
+ /* if device 1 was found in ata_devchk, wait for register
+ * access briefly, then wait for BSY to clear.
+ */
+ if (dev1) {
+ int i;
+
+ ap->ops->sff_dev_select(ap, 1);
+
+ /* Wait for register access. Some ATAPI devices fail
+ * to set nsect/lbal after reset, so don't waste too
+ * much time on it. We're gonna wait for !BSY anyway.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 nsect, lbal;
+
+ nsect = in_be32(ioaddr->nsect_addr);
+ lbal = in_be32(ioaddr->lbal_addr);
+ if ((nsect == 1) && (lbal == 1))
+ break;
+ msleep(50); /* give drive a breather */
+ }
+
+ rc = ata_sff_wait_ready(link, deadline);
+ if (rc) {
+ if (rc != -ENODEV)
+ return rc;
+ ret = rc;
+ }
+ }
+
+ /* is all this really necessary? */
+ ap->ops->sff_dev_select(ap, 0);
+ if (dev1)
+ ap->ops->sff_dev_select(ap, 1);
+ if (dev0)
+ ap->ops->sff_dev_select(ap, 0);
+
+ return ret;
+}
+
+/**
+ * scc_bus_softreset - PATA device software reset
+ *
+ * Note: Original code is ata_bus_softreset().
+ */
+
+static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
+
+ /* software reset. causes dev0 to be selected */
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+ udelay(20);
+ out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
+ udelay(20);
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+
+ scc_wait_after_reset(&ap->link, devmask, deadline);
+
+ return 0;
+}
+
+/**
+ * scc_softreset - reset host port via ATA SRST
+ * @ap: port to reset
+ * @classes: resulting classes of attached devices
+ * @deadline: deadline jiffies for the operation
+ *
+ * Note: Original code is ata_sff_softreset().
+ */
+
+static int scc_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0, err_mask;
+ u8 err;
+
+ DPRINTK("ENTER\n");
+
+ /* determine if device 0/1 are present */
+ if (scc_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && scc_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* issue bus reset */
+ DPRINTK("about to softreset, devmask=%x\n", devmask);
+ err_mask = scc_bus_softreset(ap, devmask, deadline);
+ if (err_mask) {
+ ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+ err_mask);
+ return -EIO;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&ap->link.device[0],
+ devmask & (1 << 0), &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_sff_dev_classify(&ap->link.device[1],
+ devmask & (1 << 1), &err);
+
+ DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
+ return 0;
+}
+
+/**
+ * scc_bmdma_stop - Stop PCI IDE BMDMA transfer
+ * @qc: Command we are ending DMA for
+ */
+
+static void scc_bmdma_stop (struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
+ void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
+ u32 reg;
+
+ while (1) {
+ reg = in_be32(bmid_base + SCC_DMA_INTST);
+
+ if (reg & INTSTS_SERROR) {
+ printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_PRERR) {
+ u32 maea0, maec0;
+ maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
+ maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
+ printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_RERR) {
+ printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ continue;
+ }
+
+ if (reg & INTSTS_ICERR) {
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+ printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
+ continue;
+ }
+
+ if (reg & INTSTS_BMSINT) {
+ unsigned int classes;
+ unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
+ printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
+ /* TBD: SW reset */
+ scc_softreset(&ap->link, &classes, deadline);
+ continue;
+ }
+
+ if (reg & INTSTS_BMHE) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
+ continue;
+ }
+
+ if (reg & INTSTS_ACTEINT) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
+ continue;
+ }
+
+ if (reg & INTSTS_IOIRQS) {
+ out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
+ continue;
+ }
+ break;
+ }
+
+ /* clear start/stop bit */
+ out_be32(bmid_base + SCC_DMA_CMD,
+ in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_sff_dma_pause(ap); /* dummy read */
+}
+
+/**
+ * scc_bmdma_status - Read PCI IDE BMDMA status
+ * @ap: Port associated with this ATA transaction.
+ */
+
+static u8 scc_bmdma_status (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+ u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
+ u32 int_status = in_be32(mmio + SCC_DMA_INTST);
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ static int retry = 0;
+
+ /* return if IOS_SS is cleared */
+ if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
+ return host_stat;
+
+ /* errata A252,A308 workaround: Step4 */
+ if ((scc_check_altstatus(ap) & ATA_ERR)
+ && (int_status & INTSTS_INTRQ))
+ return (host_stat | ATA_DMA_INTR);
+
+ /* errata A308 workaround Step5 */
+ if (int_status & INTSTS_IOIRQS) {
+ host_stat |= ATA_DMA_INTR;
+
+ /* We don't check ATAPI DMA because it is limited to UDMA4 */
+ if ((qc->tf.protocol == ATA_PROT_DMA &&
+ qc->dev->xfer_mode > XFER_UDMA_4)) {
+ if (!(int_status & INTSTS_ACTEINT)) {
+ printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
+ ap->print_id);
+ host_stat |= ATA_DMA_ERR;
+ if (retry++)
+ ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
+ } else
+ retry = 0;
+ }
+ }
+
+ return host_stat;
+}
+
+/**
+ * scc_data_xfer - Transfer data by PIO
+ * @dev: device for this I/O
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @rw: read/write
+ *
+ * Note: Original code is ata_sff_data_xfer().
+ */
+
+static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int words = buflen >> 1;
+ unsigned int i;
+ __le16 *buf16 = (__le16 *) buf;
+ void __iomem *mmio = ap->ioaddr.data_addr;
+
+ /* Transfer multiple of 2 bytes */
+ if (rw == READ)
+ for (i = 0; i < words; i++)
+ buf16[i] = cpu_to_le16(in_be32(mmio));
+ else
+ for (i = 0; i < words; i++)
+ out_be32(mmio, le16_to_cpu(buf16[i]));
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ __le16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (rw == READ) {
+ align_buf[0] = cpu_to_le16(in_be32(mmio));
+ memcpy(trailing_buf, align_buf, 1);
+ } else {
+ memcpy(align_buf, trailing_buf, 1);
+ out_be32(mmio, le16_to_cpu(align_buf[0]));
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+/**
+ * scc_irq_on - Enable interrupts on a port.
+ * @ap: Port on which interrupts are enabled.
+ *
+ * Note: Original code is ata_sff_irq_on().
+ */
+
+static u8 scc_irq_on (struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u8 tmp;
+
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->sff_irq_clear(ap);
+
+ return tmp;
+}
+
+/**
+ * scc_freeze - Freeze BMDMA controller port
+ * @ap: port to freeze
+ *
+ * Note: Original code is ata_sff_freeze().
+ */
+
+static void scc_freeze (struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ap->ctl |= ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ out_be32(ioaddr->ctl_addr, ap->ctl);
+
+ /* Under certain circumstances, some controllers raise IRQ on
+ * ATA_NIEN manipulation. Also, many controllers fail to mask
+ * previously pending IRQ on ATA_NIEN assertion. Clear it.
+ */
+ ap->ops->sff_check_status(ap);
+
+ ap->ops->sff_irq_clear(ap);
+}
+
+/**
+ * scc_pata_prereset - prepare for reset
+ * @ap: ATA port to be reset
+ * @deadline: deadline jiffies for the operation
+ */
+
+static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+ link->ap->cbl = ATA_CBL_PATA80;
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * scc_postreset - standard postreset callback
+ * @ap: the target ata_port
+ * @classes: classes of attached devices
+ *
+ * Note: Original code is ata_sff_postreset().
+ */
+
+static void scc_postreset(struct ata_link *link, unsigned int *classes)
+{
+ struct ata_port *ap = link->ap;
+
+ DPRINTK("ENTER\n");
+
+ /* is double-select really necessary? */
+ if (classes[0] != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 1);
+ if (classes[1] != ATA_DEV_NONE)
+ ap->ops->sff_dev_select(ap, 0);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* set up device control */
+ if (ap->ioaddr.ctl_addr)
+ out_be32(ap->ioaddr.ctl_addr, ap->ctl);
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
+ * @ap: Port associated with this ATA transaction.
+ *
+ * Note: Original code is ata_sff_irq_clear().
+ */
+
+static void scc_irq_clear (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ if (!mmio)
+ return;
+
+ out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
+}
+
+/**
+ * scc_port_start - Set port up for dma.
+ * @ap: Port to initialize
+ *
+ * Allocate space for PRD table using ata_port_start().
+ * Set PRD table address for PTERADD. (PRD Transfer End Read)
+ */
+
+static int scc_port_start (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
+ return 0;
+}
+
+/**
+ * scc_port_stop - Undo scc_port_start()
+ * @ap: Port to shut down
+ *
+ * Reset PTERADD.
+ */
+
+static void scc_port_stop (struct ata_port *ap)
+{
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ out_be32(mmio + SCC_DMA_PTERADD, 0);
+}
+
+static struct scsi_host_template scc_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations scc_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .set_piomode = scc_set_piomode,
+ .set_dmamode = scc_set_dmamode,
+ .mode_filter = scc_mode_filter,
+
+ .sff_tf_load = scc_tf_load,
+ .sff_tf_read = scc_tf_read,
+ .sff_exec_command = scc_exec_command,
+ .sff_check_status = scc_check_status,
+ .sff_check_altstatus = scc_check_altstatus,
+ .sff_dev_select = scc_dev_select,
+
+ .bmdma_setup = scc_bmdma_setup,
+ .bmdma_start = scc_bmdma_start,
+ .bmdma_stop = scc_bmdma_stop,
+ .bmdma_status = scc_bmdma_status,
+ .sff_data_xfer = scc_data_xfer,
+
+ .freeze = scc_freeze,
+ .prereset = scc_pata_prereset,
+ .softreset = scc_softreset,
+ .postreset = scc_postreset,
+ .post_internal_cmd = scc_bmdma_stop,
+
+ .sff_irq_clear = scc_irq_clear,
+ .sff_irq_on = scc_irq_on,
+
+ .port_start = scc_port_start,
+ .port_stop = scc_port_stop,
+};
+
+static struct ata_port_info scc_port_info[] = {
+ {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x00,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &scc_pata_ops,
+ },
+};
+
+/**
+ * scc_reset_controller - initialize SCC PATA controller.
+ */
+
+static int scc_reset_controller(struct ata_host *host)
+{
+ void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
+ void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
+ void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
+ void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
+ void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
+ void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
+ void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
+ u32 reg = 0;
+
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_ATACLKOEN;
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
+ out_be32(cckctrl_port, reg);
+ reg |= CCKCTRL_CRST;
+ out_be32(cckctrl_port, reg);
+
+ for (;;) {
+ reg = in_be32(cckctrl_port);
+ if (reg & CCKCTRL_CRST)
+ break;
+ udelay(5000);
+ }
+
+ reg |= CCKCTRL_ATARESET;
+ out_be32(cckctrl_port, reg);
+ out_be32(ecmode_port, ECMODE_VALUE);
+ out_be32(mode_port, MODE_JCUSFEN);
+ out_be32(intmask_port, INTMASK_MSK);
+
+ if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
+ printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
+ * @ioaddr: IO address structure to be initialized
+ * @base: base address of BMID region
+ */
+
+static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
+{
+ ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
+ ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+ ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
+ ioaddr->bmdma_addr = base;
+ ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
+ ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
+ ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
+ ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
+ ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
+ ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
+ ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
+ ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
+ ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
+ ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
+}
+
+static int scc_host_init(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ int rc;
+
+ rc = scc_reset_controller(host);
+ if (rc)
+ return rc;
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+/**
+ * scc_init_one - Register SCC PATA device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in scc_pci_tbl matching with @pdev
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
+ struct ata_host *host;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
+ if (!host)
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
+ ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
+
+ rc = scc_host_init(host);
+ if (rc)
+ return rc;
+
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &scc_sht);
+}
+
+static struct pci_driver scc_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = scc_pci_tbl,
+ .probe = scc_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init scc_init (void)
+{
+ int rc;
+
+ DPRINTK("pci_register_driver\n");
+ rc = pci_register_driver(&scc_pci_driver);
+ if (rc)
+ return rc;
+
+ DPRINTK("done\n");
+ return 0;
+}
+
+static void __exit scc_exit (void)
+{
+ pci_unregister_driver(&scc_pci_driver);
+}
+
+module_init(scc_init);
+module_exit(scc_exit);
+
+MODULE_AUTHOR("Toshiba corp");
+MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
new file mode 100644
index 0000000..6aeeeeb
--- /dev/null
+++ b/drivers/ata/pata_sch.c
@@ -0,0 +1,206 @@
+/*
+ * pata_sch.c - Intel SCH PATA controllers
+ *
+ * Copyright (c) 2008 Alek Du <alek.du@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+ * Supports:
+ * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
+ * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_sch"
+#define DRV_VERSION "0.2"
+
+/* see SCH datasheet page 351 */
+enum {
+ D0TIM = 0x80, /* Device 0 Timing Register */
+ D1TIM = 0x84, /* Device 1 Timing Register */
+ PM = 0x07, /* PIO Mode Bit Mask */
+ MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */
+ UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */
+ PPE = (1 << 30), /* Prefetch/Post Enable */
+ USD = (1 << 31), /* Use Synchronous DMA */
+};
+
+static int sch_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+
+static const struct pci_device_id sch_pci_tbl[] = {
+ /* Intel SCH PATA Controller */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
+ { } /* terminate list */
+};
+
+static struct pci_driver sch_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sch_pci_tbl,
+ .probe = sch_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sch_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sch_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = sch_set_piomode,
+ .set_dmamode = sch_set_dmamode,
+};
+
+static struct ata_port_info sch_port_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4, /* pio0-4 */
+ .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &sch_pata_ops,
+};
+
+MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * sch_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: ATA device
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int port = adev->devno ? D1TIM : D0TIM;
+ unsigned int data;
+
+ pci_read_config_dword(dev, port, &data);
+ /* see SCH datasheet page 351 */
+ /* set PIO mode */
+ data &= ~(PM | PPE);
+ data |= pio;
+ /* enable PPE for block device */
+ if (adev->class == ATA_DEV_ATA)
+ data |= PPE;
+ pci_write_config_dword(dev, port, data);
+}
+
+/**
+ * sch_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: ATA device
+ *
+ * Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int dma_mode = adev->dma_mode;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int port = adev->devno ? D1TIM : D0TIM;
+ unsigned int data;
+
+ pci_read_config_dword(dev, port, &data);
+ /* see SCH datasheet page 351 */
+ if (dma_mode >= XFER_UDMA_0) {
+ /* enable Synchronous DMA mode */
+ data |= USD;
+ data &= ~UDM;
+ data |= (dma_mode - XFER_UDMA_0) << 16;
+ } else { /* must be MWDMA mode, since we masked SWDMA already */
+ data &= ~(USD | MDM);
+ data |= (dma_mode - XFER_MW_DMA_0) << 8;
+ }
+ pci_write_config_dword(dev, port, data);
+}
+
+/**
+ * sch_init_one - Register SCH ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in sch_pci_tbl matching with @pdev
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int __devinit sch_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
+ struct ata_host *host;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ /* enable device and prepare host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ pci_set_master(pdev);
+ return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
+}
+
+static int __init sch_init(void)
+{
+ return pci_register_driver(&sch_pci_driver);
+}
+
+static void __exit sch_exit(void)
+{
+ pci_unregister_driver(&sch_pci_driver);
+}
+
+module_init(sch_init);
+module_exit(sch_exit);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
new file mode 100644
index 0000000..72e41c9
--- /dev/null
+++ b/drivers/ata/pata_serverworks.c
@@ -0,0 +1,538 @@
+/*
+ * pata_serverworks.c - Serverworks PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * based upon
+ *
+ * serverworks.c
+ *
+ * Copyright (C) 1998-2000 Michel Aubry
+ * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Portions copyright (c) 2001 Sun Microsystems
+ *
+ *
+ * RCC/ServerWorks IDE driver for Linux
+ *
+ * OSB4: `Open South Bridge' IDE Interface (fn 1)
+ * supports UDMA mode 2 (33 MB/s)
+ *
+ * CSB5: `Champion South Bridge' IDE Interface (fn 1)
+ * all revisions support UDMA mode 4 (66 MB/s)
+ * revision A2.0 and up support UDMA mode 5 (100 MB/s)
+ *
+ * *** The CSB5 does not provide ANY register ***
+ * *** to detect 80-conductor cable presence. ***
+ *
+ * CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
+ *
+ * Documentation:
+ * Available under NDA only. Errata info very hard to get.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_serverworks"
+#define DRV_VERSION "0.4.3"
+
+#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
+#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
+
+/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
+ * can overrun their FIFOs when used with the CSB5 */
+
+static const char *csb_bad_ata100[] = {
+ "ST320011A",
+ "ST340016A",
+ "ST360021A",
+ "ST380021A",
+ NULL
+};
+
+/**
+ * dell_cable - Dell serverworks cable detection
+ * @ap: ATA port to do cable detect
+ *
+ * Dell hide the 40/80 pin select for their interfaces in the top two
+ * bits of the subsystem ID.
+ */
+
+static int dell_cable(struct ata_port *ap) {
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * sun_cable - Sun Cobalt 'Alpine' cable detection
+ * @ap: ATA port to do cable select
+ *
+ * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
+ * subsystem ID the same as dell. We could use one function but we may
+ * need to extend the Dell one in future
+ */
+
+static int sun_cable(struct ata_port *ap) {
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * osb4_cable - OSB4 cable detect
+ * @ap: ATA port to check
+ *
+ * The OSB4 isn't UDMA66 capable so this is easy
+ */
+
+static int osb4_cable(struct ata_port *ap) {
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * csb_cable - CSB5/6 cable detect
+ * @ap: ATA port to check
+ *
+ * Serverworks default arrangement is to use the drive side detection
+ * only.
+ */
+
+static int csb_cable(struct ata_port *ap) {
+ return ATA_CBL_PATA_UNK;
+}
+
+struct sv_cable_table {
+ int device;
+ int subvendor;
+ int (*cable_detect)(struct ata_port *ap);
+};
+
+/*
+ * Note that we don't copy the old serverworks code because the old
+ * code contains obvious mistakes
+ */
+
+static struct sv_cable_table cable_detect[] = {
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
+ { }
+};
+
+/**
+ * serverworks_cable_detect - cable detection
+ * @ap: ATA port
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform cable detection according to the device and subvendor
+ * identifications
+ */
+
+static int serverworks_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct sv_cable_table *cb = cable_detect;
+
+ while(cb->device) {
+ if (cb->device == pdev->device &&
+ (cb->subvendor == pdev->subsystem_vendor ||
+ cb->subvendor == PCI_ANY_ID)) {
+ return cb->cable_detect(ap);
+ }
+ cb++;
+ }
+
+ BUG();
+ return -1; /* kill compiler warning */
+}
+
+/**
+ * serverworks_is_csb - Check for CSB or OSB
+ * @pdev: PCI device to check
+ *
+ * Returns true if the device being checked is known to be a CSB
+ * series device.
+ */
+
+static u8 serverworks_is_csb(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * serverworks_osb4_filter - mode selection filter
+ * @adev: ATA device
+ * @mask: Mask of proposed modes
+ *
+ * Filter the offered modes for the device to apply controller
+ * specific rules. OSB4 requires no UDMA for disks due to a FIFO
+ * bug we hit.
+ */
+
+static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned long mask)
+{
+ if (adev->class == ATA_DEV_ATA)
+ mask &= ~ATA_MASK_UDMA;
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+
+/**
+ * serverworks_csb_filter - mode selection filter
+ * @adev: ATA device
+ * @mask: Mask of proposed modes
+ *
+ * Check the blacklist and disable UDMA5 if matched
+ */
+
+static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned long mask)
+{
+ const char *p;
+ char model_num[ATA_ID_PROD_LEN + 1];
+ int i;
+
+ /* Disk, UDMA */
+ if (adev->class != ATA_DEV_ATA)
+ return ata_bmdma_mode_filter(adev, mask);
+
+ /* Actually do need to check */
+ ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
+ if (!strcmp(p, model_num))
+ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+ }
+ return ata_bmdma_mode_filter(adev, mask);
+}
+
+/**
+ * serverworks_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the OSB4/CSB5 timing registers for PIO. The PIO register
+ * load is done as a simple lookup.
+ */
+static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+ int offset = 1 + 2 * ap->port_no - adev->devno;
+ int devbits = (2 * ap->port_no + adev->devno) * 4;
+ u16 csb5_pio;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int pio = adev->pio_mode - XFER_PIO_0;
+
+ pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
+
+ /* The OSB4 just requires the timing but the CSB series want the
+ mode number as well */
+ if (serverworks_is_csb(pdev)) {
+ pci_read_config_word(pdev, 0x4A, &csb5_pio);
+ csb5_pio &= ~(0x0F << devbits);
+ pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
+ }
+}
+
+/**
+ * serverworks_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
+ * chipset. The MWDMA mode values are pulled from a lookup table
+ * while the chipset uses mode number for UDMA.
+ */
+
+static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
+ int offset = 1 + 2 * ap->port_no - adev->devno;
+ int devbits = 2 * ap->port_no + adev->devno;
+ u8 ultra;
+ u8 ultra_cfg;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ pci_read_config_byte(pdev, 0x54, &ultra_cfg);
+ pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
+ ultra &= ~(0x0F << (adev->devno * 4));
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ pci_write_config_byte(pdev, 0x44 + offset, 0x20);
+
+ ultra |= (adev->dma_mode - XFER_UDMA_0)
+ << (adev->devno * 4);
+ ultra_cfg |= (1 << devbits);
+ } else {
+ pci_write_config_byte(pdev, 0x44 + offset,
+ dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
+ ultra_cfg &= ~(1 << devbits);
+ }
+ pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
+ pci_write_config_byte(pdev, 0x54, ultra_cfg);
+}
+
+static struct scsi_host_template serverworks_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations serverworks_osb4_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = serverworks_cable_detect,
+ .mode_filter = serverworks_osb4_filter,
+ .set_piomode = serverworks_set_piomode,
+ .set_dmamode = serverworks_set_dmamode,
+};
+
+static struct ata_port_operations serverworks_csb_port_ops = {
+ .inherits = &serverworks_osb4_port_ops,
+ .mode_filter = serverworks_csb_filter,
+};
+
+static int serverworks_fixup_osb4(struct pci_dev *pdev)
+{
+ u32 reg;
+ struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+ if (isa_dev) {
+ pci_read_config_dword(isa_dev, 0x64, &reg);
+ reg &= ~0x00002000; /* disable 600ns interrupt mask */
+ if (!(reg & 0x00004000))
+ printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
+ reg |= 0x00004000; /* enable UDMA/33 support */
+ pci_write_config_dword(isa_dev, 0x64, reg);
+ pci_dev_put(isa_dev);
+ return 0;
+ }
+ printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
+ return -ENODEV;
+}
+
+static int serverworks_fixup_csb(struct pci_dev *pdev)
+{
+ u8 btr;
+
+ /* Third Channel Test */
+ if (!(PCI_FUNC(pdev->devfn) & 1)) {
+ struct pci_dev * findev = NULL;
+ u32 reg4c = 0;
+ findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
+ if (findev) {
+ pci_read_config_dword(findev, 0x4C, &reg4c);
+ reg4c &= ~0x000007FF;
+ reg4c |= 0x00000040;
+ reg4c |= 0x00000020;
+ pci_write_config_dword(findev, 0x4C, reg4c);
+ pci_dev_put(findev);
+ }
+ } else {
+ struct pci_dev * findev = NULL;
+ u8 reg41 = 0;
+
+ findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
+ if (findev) {
+ pci_read_config_byte(findev, 0x41, &reg41);
+ reg41 &= ~0x40;
+ pci_write_config_byte(findev, 0x41, reg41);
+ pci_dev_put(findev);
+ }
+ }
+ /* setup the UDMA Control register
+ *
+ * 1. clear bit 6 to enable DMA
+ * 2. enable DMA modes with bits 0-1
+ * 00 : legacy
+ * 01 : udma2
+ * 10 : udma2/udma4
+ * 11 : udma2/udma4/udma5
+ */
+ pci_read_config_byte(pdev, 0x5A, &btr);
+ btr &= ~0x40;
+ if (!(PCI_FUNC(pdev->devfn) & 1))
+ btr |= 0x2;
+ else
+ btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+ pci_write_config_byte(pdev, 0x5A, btr);
+
+ return btr;
+}
+
+static void serverworks_fixup_ht1000(struct pci_dev *pdev)
+{
+ u8 btr;
+ /* Setup HT1000 SouthBridge Controller - Single Channel Only */
+ pci_read_config_byte(pdev, 0x5A, &btr);
+ btr &= ~0x40;
+ btr |= 0x3;
+ pci_write_config_byte(pdev, 0x5A, btr);
+}
+
+
+static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info[4] = {
+ { /* OSB4 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x07,
+ .port_ops = &serverworks_osb4_port_ops
+ }, { /* OSB4 no UDMA */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x00,
+ .port_ops = &serverworks_osb4_port_ops
+ }, { /* CSB5 */
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &serverworks_csb_port_ops
+ }, { /* CSB5 - later revisions*/
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &serverworks_csb_port_ops
+ }
+ };
+ const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Force master latency timer to 64 PCI clocks */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+ /* OSB4 : South Bridge and IDE */
+ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
+ /* Select non UDMA capable OSB4 if we can't do fixups */
+ if ( serverworks_fixup_osb4(pdev) < 0)
+ ppi[0] = &info[1];
+ }
+ /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
+ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
+ (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
+ (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
+
+ /* If the returned btr is the newer revision then
+ select the right info block */
+ if (serverworks_fixup_csb(pdev) == 3)
+ ppi[0] = &info[3];
+
+ /* Is this the 3rd channel CSB6 IDE ? */
+ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
+ ppi[1] = &ata_dummy_port_info;
+ }
+ /* setup HT1000E */
+ else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
+ serverworks_fixup_ht1000(pdev);
+
+ if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
+ ata_pci_bmdma_clear_simplex(pdev);
+
+ return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int serverworks_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ /* Force master latency timer to 64 PCI clocks */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+ serverworks_fixup_osb4(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+ ata_pci_bmdma_clear_simplex(pdev);
+ /* fall through */
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+ serverworks_fixup_csb(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+ serverworks_fixup_ht1000(pdev);
+ break;
+ }
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id serverworks[] = {
+ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
+ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
+ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
+ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
+ { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
+
+ { },
+};
+
+static struct pci_driver serverworks_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = serverworks,
+ .probe = serverworks_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = serverworks_reinit_one,
+#endif
+};
+
+static int __init serverworks_init(void)
+{
+ return pci_register_driver(&serverworks_pci_driver);
+}
+
+static void __exit serverworks_exit(void)
+{
+ pci_unregister_driver(&serverworks_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, serverworks);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(serverworks_init);
+module_exit(serverworks_exit);
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
new file mode 100644
index 0000000..83580a5
--- /dev/null
+++ b/drivers/ata/pata_sil680.c
@@ -0,0 +1,413 @@
+/*
+ * pata_sil680.c - SIL680 PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * based upon
+ *
+ * linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
+ *
+ * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat <alan@redhat.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Documentation publically available.
+ *
+ * If you have strange problems with nVidia chipset systems please
+ * see the SI support documentation and update your system BIOS
+ * if necessary
+ *
+ * TODO
+ * If we know all our devices are LBA28 (or LBA28 sized) we could use
+ * the command fifo mode.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sil680"
+#define DRV_VERSION "0.4.8"
+
+#define SIL680_MMIO_BAR 5
+
+/**
+ * sil680_selreg - return register base
+ * @hwif: interface
+ * @r: config offset
+ *
+ * Turn a config register offset into the right address in either
+ * PCI space or MMIO space to access the control register in question
+ * Thankfully this is a configuration operation so isnt performance
+ * criticial.
+ */
+
+static unsigned long sil680_selreg(struct ata_port *ap, int r)
+{
+ unsigned long base = 0xA0 + r;
+ base += (ap->port_no << 4);
+ return base;
+}
+
+/**
+ * sil680_seldev - return register base
+ * @hwif: interface
+ * @r: config offset
+ *
+ * Turn a config register offset into the right address in either
+ * PCI space or MMIO space to access the control register in question
+ * including accounting for the unit shift.
+ */
+
+static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
+{
+ unsigned long base = 0xA0 + r;
+ base += (ap->port_no << 4);
+ base |= adev->devno ? 2 : 0;
+ return base;
+}
+
+
+/**
+ * sil680_cable_detect - cable detection
+ * @ap: ATA port
+ *
+ * Perform cable detection. The SIL680 stores this in PCI config
+ * space for us.
+ */
+
+static int sil680_cable_detect(struct ata_port *ap) {
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned long addr = sil680_selreg(ap, 0);
+ u8 ata66;
+ pci_read_config_byte(pdev, addr, &ata66);
+ if (ata66 & 1)
+ return ATA_CBL_PATA80;
+ else
+ return ATA_CBL_PATA40;
+}
+
+/**
+ * sil680_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the SIL680 registers for PIO mode. Note that the task speed
+ * registers are shared between the devices so we must pick the lowest
+ * mode for command work.
+ */
+
+static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
+ static u16 speed_t[5] = { 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1 };
+
+ unsigned long tfaddr = sil680_selreg(ap, 0x02);
+ unsigned long addr = sil680_seldev(ap, adev, 0x04);
+ unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int pio = adev->pio_mode - XFER_PIO_0;
+ int lowest_pio = pio;
+ int port_shift = 4 * adev->devno;
+ u16 reg;
+ u8 mode;
+
+ struct ata_device *pair = ata_dev_pair(adev);
+
+ if (pair != NULL && adev->pio_mode > pair->pio_mode)
+ lowest_pio = pair->pio_mode - XFER_PIO_0;
+
+ pci_write_config_word(pdev, addr, speed_p[pio]);
+ pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
+
+ pci_read_config_word(pdev, tfaddr-2, &reg);
+ pci_read_config_byte(pdev, addr_mask, &mode);
+
+ reg &= ~0x0200; /* Clear IORDY */
+ mode &= ~(3 << port_shift); /* Clear IORDY and DMA bits */
+
+ if (ata_pio_need_iordy(adev)) {
+ reg |= 0x0200; /* Enable IORDY */
+ mode |= 1 << port_shift;
+ }
+ pci_write_config_word(pdev, tfaddr-2, reg);
+ pci_write_config_byte(pdev, addr_mask, mode);
+}
+
+/**
+ * sil680_set_dmamode - set initial DMA mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Program the MWDMA/UDMA modes for the sil680 k
+ * chipset. The MWDMA mode values are pulled from a lookup table
+ * while the chipset uses mode number for UDMA.
+ */
+
+static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ static u8 ultra_table[2][7] = {
+ { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
+ { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
+ };
+ static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
+
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned long ma = sil680_seldev(ap, adev, 0x08);
+ unsigned long ua = sil680_seldev(ap, adev, 0x0C);
+ unsigned long addr_mask = 0x80 + 4 * ap->port_no;
+ int port_shift = adev->devno * 4;
+ u8 scsc, mode;
+ u16 multi, ultra;
+
+ pci_read_config_byte(pdev, 0x8A, &scsc);
+ pci_read_config_byte(pdev, addr_mask, &mode);
+ pci_read_config_word(pdev, ma, &multi);
+ pci_read_config_word(pdev, ua, &ultra);
+
+ /* Mask timing bits */
+ ultra &= ~0x3F;
+ mode &= ~(0x03 << port_shift);
+
+ /* Extract scsc */
+ scsc = (scsc & 0x30) ? 1: 0;
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ multi = 0x10C1;
+ ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
+ mode |= (0x03 << port_shift);
+ } else {
+ multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
+ mode |= (0x02 << port_shift);
+ }
+ pci_write_config_byte(pdev, addr_mask, mode);
+ pci_write_config_word(pdev, ma, multi);
+ pci_write_config_word(pdev, ua, ultra);
+}
+
+static struct scsi_host_template sil680_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sil680_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = sil680_cable_detect,
+ .set_piomode = sil680_set_piomode,
+ .set_dmamode = sil680_set_dmamode,
+};
+
+/**
+ * sil680_init_chip - chip setup
+ * @pdev: PCI device
+ *
+ * Perform all the chip setup which must be done both when the device
+ * is powered up on boot and when we resume in case we resumed from RAM.
+ * Returns the final clock settings.
+ */
+
+static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
+{
+ u32 class_rev = 0;
+ u8 tmpbyte = 0;
+
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+ class_rev &= 0xff;
+ /* FIXME: double check */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
+
+ pci_write_config_byte(pdev, 0x80, 0x00);
+ pci_write_config_byte(pdev, 0x84, 0x00);
+
+ pci_read_config_byte(pdev, 0x8A, &tmpbyte);
+
+ dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+ tmpbyte & 1, tmpbyte & 0x30);
+
+ *try_mmio = 0;
+#ifdef CONFIG_PPC
+ if (machine_is(cell))
+ *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
+#endif
+
+ switch(tmpbyte & 0x30) {
+ case 0x00:
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
+ break;
+ case 0x30:
+ /* if clocking is disabled */
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
+ break;
+ case 0x10:
+ /* 133 already */
+ break;
+ case 0x20:
+ /* BIOS set PCI x2 clocking */
+ break;
+ }
+
+ pci_read_config_byte(pdev, 0x8A, &tmpbyte);
+ dev_dbg(&pdev->dev, "sil680: BA5_EN = %d clock = %02X\n",
+ tmpbyte & 1, tmpbyte & 0x30);
+
+ pci_write_config_byte(pdev, 0xA1, 0x72);
+ pci_write_config_word(pdev, 0xA2, 0x328A);
+ pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
+ pci_write_config_dword(pdev, 0xA8, 0x43924392);
+ pci_write_config_dword(pdev, 0xAC, 0x40094009);
+ pci_write_config_byte(pdev, 0xB1, 0x72);
+ pci_write_config_word(pdev, 0xB2, 0x328A);
+ pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
+ pci_write_config_dword(pdev, 0xB8, 0x43924392);
+ pci_write_config_dword(pdev, 0xBC, 0x40094009);
+
+ switch(tmpbyte & 0x30) {
+ case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
+ case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
+ case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
+ /* This last case is _NOT_ ok */
+ case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
+ }
+ return tmpbyte & 0x30;
+}
+
+static int __devinit sil680_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sil680_port_ops
+ };
+ static const struct ata_port_info info_slow = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sil680_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ static int printed_version;
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int rc, try_mmio;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ switch (sil680_init_chip(pdev, &try_mmio)) {
+ case 0:
+ ppi[0] = &info_slow;
+ break;
+ case 0x30:
+ return -ENODEV;
+ }
+
+ if (!try_mmio)
+ goto use_ioports;
+
+ /* Try to acquire MMIO resources and fallback to PIO if
+ * that fails
+ */
+ rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
+ if (rc)
+ goto use_ioports;
+
+ /* Allocate host and set it up */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host)
+ return -ENOMEM;
+ host->iomap = pcim_iomap_table(pdev);
+
+ /* Setup DMA masks */
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ pci_set_master(pdev);
+
+ /* Get MMIO base and initialize port addresses */
+ mmio_base = host->iomap[SIL680_MMIO_BAR];
+ host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00;
+ host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80;
+ host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a;
+ host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a;
+ ata_sff_std_ports(&host->ports[0]->ioaddr);
+ host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08;
+ host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0;
+ host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca;
+ host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca;
+ ata_sff_std_ports(&host->ports[1]->ioaddr);
+
+ /* Register & activate */
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &sil680_sht);
+
+use_ioports:
+ return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL);
+}
+
+#ifdef CONFIG_PM
+static int sil680_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int try_mmio, rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+ sil680_init_chip(pdev, &try_mmio);
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id sil680[] = {
+ { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
+
+ { },
+};
+
+static struct pci_driver sil680_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sil680,
+ .probe = sil680_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = sil680_reinit_one,
+#endif
+};
+
+static int __init sil680_init(void)
+{
+ return pci_register_driver(&sil680_pci_driver);
+}
+
+static void __exit sil680_exit(void)
+{
+ pci_unregister_driver(&sil680_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for SI680 PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil680);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sil680_init);
+module_exit(sil680_exit);
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
new file mode 100644
index 0000000..e4be55e
--- /dev/null
+++ b/drivers/ata/pata_sis.c
@@ -0,0 +1,864 @@
+/*
+ * pata_sis.c - SiS ATA driver
+ *
+ * (C) 2005 Red Hat
+ * (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * Based upon linux/drivers/ide/pci/sis5513.c
+ * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
+ * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
+ * SiS Taiwan : for direct support and hardware.
+ * Daniela Engert : for initial ATA100 advices and numerous others.
+ * John Fremlin, Manfred Spraul, Dave Morgan, Peter Kjellerstedt :
+ * for checking code correctness, providing patches.
+ * Original tests and design on the SiS620 chipset.
+ * ATA100 tests and design on the SiS735 chipset.
+ * ATA16/33 support from specs
+ * ATA133 support for SiS961/962 by L.C. Chang <lcchang@sis.com.tw>
+ *
+ *
+ * TODO
+ * Check MWDMA on drives that don't support MWDMA speed pio cycles ?
+ * More Testing
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/ata.h>
+#include "sis.h"
+
+#define DRV_NAME "pata_sis"
+#define DRV_VERSION "0.5.2"
+
+struct sis_chipset {
+ u16 device; /* PCI host ID */
+ const struct ata_port_info *info; /* Info block */
+ /* Probably add family, cable detect type etc here to clean
+ up code later */
+};
+
+struct sis_laptop {
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+};
+
+static const struct sis_laptop sis_laptop[] = {
+ /* devid, subvendor, subdev */
+ { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
+ { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
+ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
+ /* end marker */
+ { 0, }
+};
+
+static int sis_short_ata40(struct pci_dev *dev)
+{
+ const struct sis_laptop *lap = &sis_laptop[0];
+
+ while (lap->device) {
+ if (lap->device == dev->device &&
+ lap->subvendor == dev->subsystem_vendor &&
+ lap->subdevice == dev->subsystem_device)
+ return 1;
+ lap++;
+ }
+
+ return 0;
+}
+
+/**
+ * sis_old_port_base - return PCI configuration base for dev
+ * @adev: device
+ *
+ * Returns the base of the PCI configuration registers for this port
+ * number.
+ */
+
+static int sis_old_port_base(struct ata_device *adev)
+{
+ return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
+}
+
+/**
+ * sis_133_cable_detect - check for 40/80 pin
+ * @ap: Port
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform cable detection for the later UDMA133 capable
+ * SiS chipset.
+ */
+
+static int sis_133_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u16 tmp;
+
+ /* The top bit of this register is the cable detect bit */
+ pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
+ if ((tmp & 0x8000) && !sis_short_ata40(pdev))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * sis_66_cable_detect - check for 40/80 pin
+ * @ap: Port
+ * @deadline: deadline jiffies for the operation
+ *
+ * Perform cable detection on the UDMA66, UDMA100 and early UDMA133
+ * SiS IDE controllers.
+ */
+
+static int sis_66_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 tmp;
+
+ /* Older chips keep cable detect in bits 4/5 of reg 0x48 */
+ pci_read_config_byte(pdev, 0x48, &tmp);
+ tmp >>= ap->port_no;
+ if ((tmp & 0x10) && !sis_short_ata40(pdev))
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+
+/**
+ * sis_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits sis_enable_bits[] = {
+ { 0x4aU, 1U, 0x02UL, 0x02UL }, /* port 0 */
+ { 0x4aU, 1U, 0x04UL, 0x04UL }, /* port 1 */
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ /* Clear the FIFO settings. We can't enable the FIFO until
+ we know we are poking at a disk */
+ pci_write_config_byte(pdev, 0x4B, 0);
+ return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ * sis_set_fifo - Set RWP fifo bits for this device
+ * @ap: Port
+ * @adev: Device
+ *
+ * SIS chipsets implement prefetch/postwrite bits for each device
+ * on both channels. This functionality is not ATAPI compatible and
+ * must be configured according to the class of device present
+ */
+
+static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 fifoctrl;
+ u8 mask = 0x11;
+
+ mask <<= (2 * ap->port_no);
+ mask <<= adev->devno;
+
+ /* This holds various bits including the FIFO control */
+ pci_read_config_byte(pdev, 0x4B, &fifoctrl);
+ fifoctrl &= ~mask;
+
+ /* Enable for ATA (disk) only */
+ if (adev->class == ATA_DEV_ATA)
+ fifoctrl |= mask;
+ pci_write_config_byte(pdev, 0x4B, fifoctrl);
+}
+
+/**
+ * sis_old_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring for.
+ *
+ * Set PIO mode for device, in host controller PCI config space. This
+ * function handles PIO set up for all chips that are pre ATA100 and
+ * also early ATA100 devices.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = sis_old_port_base(adev);
+ u8 t1, t2;
+ int speed = adev->pio_mode - XFER_PIO_0;
+
+ const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
+ const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
+
+ sis_set_fifo(ap, adev);
+
+ pci_read_config_byte(pdev, port, &t1);
+ pci_read_config_byte(pdev, port + 1, &t2);
+
+ t1 &= ~0x0F; /* Clear active/recovery timings */
+ t2 &= ~0x07;
+
+ t1 |= active[speed];
+ t2 |= recovery[speed];
+
+ pci_write_config_byte(pdev, port, t1);
+ pci_write_config_byte(pdev, port + 1, t2);
+}
+
+/**
+ * sis_100_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring for.
+ *
+ * Set PIO mode for device, in host controller PCI config space. This
+ * function handles PIO set up for ATA100 devices and early ATA133.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = sis_old_port_base(adev);
+ int speed = adev->pio_mode - XFER_PIO_0;
+
+ const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
+
+ sis_set_fifo(ap, adev);
+
+ pci_write_config_byte(pdev, port, actrec[speed]);
+}
+
+/**
+ * sis_133_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device we are configuring for.
+ *
+ * Set PIO mode for device, in host controller PCI config space. This
+ * function handles PIO set up for the later ATA133 devices.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x40;
+ u32 t1;
+ u32 reg54;
+ int speed = adev->pio_mode - XFER_PIO_0;
+
+ const u32 timing133[] = {
+ 0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
+ 0x0C266000,
+ 0x04263000,
+ 0x0C0A3000,
+ 0x05093000
+ };
+ const u32 timing100[] = {
+ 0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
+ 0x091C4000,
+ 0x031C2000,
+ 0x09072000,
+ 0x04062000
+ };
+
+ sis_set_fifo(ap, adev);
+
+ /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
+ pci_read_config_dword(pdev, 0x54, &reg54);
+ if (reg54 & 0x40000000)
+ port = 0x70;
+ port += 8 * ap->port_no + 4 * adev->devno;
+
+ pci_read_config_dword(pdev, port, &t1);
+ t1 &= 0xC0C00FFF; /* Mask out timing */
+
+ if (t1 & 0x08) /* 100 or 133 ? */
+ t1 |= timing133[speed];
+ else
+ t1 |= timing100[speed];
+ pci_write_config_byte(pdev, port, t1);
+}
+
+/**
+ * sis_old_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ * Handles pre UDMA and UDMA33 devices. Supports MWDMA as well unlike
+ * the old ide/pci driver.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ int drive_pci = sis_old_port_base(adev);
+ u16 timing;
+
+ const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
+
+ pci_read_config_word(pdev, drive_pci, &timing);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ /* bits 3-0 hold recovery timing bits 8-10 active timing and
+ the higher bits are dependant on the device */
+ timing &= ~0x870F;
+ timing |= mwdma_bits[speed];
+ } else {
+ /* Bit 15 is UDMA on/off, bit 13-14 are cycle time */
+ speed = adev->dma_mode - XFER_UDMA_0;
+ timing &= ~0x6000;
+ timing |= udma_bits[speed];
+ }
+ pci_write_config_word(pdev, drive_pci, timing);
+}
+
+/**
+ * sis_66_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ * Handles UDMA66 and early UDMA100 devices. Supports MWDMA as well unlike
+ * the old ide/pci driver.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ int drive_pci = sis_old_port_base(adev);
+ u16 timing;
+
+ /* MWDMA 0-2 and UDMA 0-5 */
+ const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
+
+ pci_read_config_word(pdev, drive_pci, &timing);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ /* bits 3-0 hold recovery timing bits 8-10 active timing and
+ the higher bits are dependant on the device, bit 15 udma */
+ timing &= ~0x870F;
+ timing |= mwdma_bits[speed];
+ } else {
+ /* Bit 15 is UDMA on/off, bit 12-14 are cycle time */
+ speed = adev->dma_mode - XFER_UDMA_0;
+ timing &= ~0xF000;
+ timing |= udma_bits[speed];
+ }
+ pci_write_config_word(pdev, drive_pci, timing);
+}
+
+/**
+ * sis_100_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ * Handles UDMA66 and early UDMA100 devices.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ int drive_pci = sis_old_port_base(adev);
+ u8 timing;
+
+ const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+ pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+ } else {
+ /* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+ speed = adev->dma_mode - XFER_UDMA_0;
+ timing &= ~0x8F;
+ timing |= udma_bits[speed];
+ }
+ pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ * sis_133_early_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ * Handles early SiS 961 bridges.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ int drive_pci = sis_old_port_base(adev);
+ u8 timing;
+ /* Low 4 bits are timing */
+ static const u8 udma_bits[] = { 0x8F, 0x8A, 0x87, 0x85, 0x83, 0x82, 0x81};
+
+ pci_read_config_byte(pdev, drive_pci + 1, &timing);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ /* NOT SUPPORTED YET: NEED DATA SHEET. DITTO IN OLD DRIVER */
+ } else {
+ /* Bit 7 is UDMA on/off, bit 0-3 are cycle time */
+ speed = adev->dma_mode - XFER_UDMA_0;
+ timing &= ~0x8F;
+ timing |= udma_bits[speed];
+ }
+ pci_write_config_byte(pdev, drive_pci + 1, timing);
+}
+
+/**
+ * sis_133_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Device to program
+ *
+ * Set UDMA/MWDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+ int port = 0x40;
+ u32 t1;
+ u32 reg54;
+
+ /* bits 4- cycle time 8 - cvs time */
+ static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
+ static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
+
+ /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
+ pci_read_config_dword(pdev, 0x54, &reg54);
+ if (reg54 & 0x40000000)
+ port = 0x70;
+ port += (8 * ap->port_no) + (4 * adev->devno);
+
+ pci_read_config_dword(pdev, port, &t1);
+
+ if (adev->dma_mode < XFER_UDMA_0) {
+ t1 &= ~0x00000004;
+ /* FIXME: need data sheet to add MWDMA here. Also lacking on
+ ide/pci driver */
+ } else {
+ speed = adev->dma_mode - XFER_UDMA_0;
+ /* if & 8 no UDMA133 - need info for ... */
+ t1 &= ~0x00000FF0;
+ t1 |= 0x00000004;
+ if (t1 & 0x08)
+ t1 |= timing_u133[speed];
+ else
+ t1 |= timing_u100[speed];
+ }
+ pci_write_config_dword(pdev, port, t1);
+}
+
+static struct scsi_host_template sis_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sis_133_for_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+};
+
+static struct ata_port_operations sis_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .prereset = sis_pre_reset,
+};
+
+static struct ata_port_operations sis_133_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_133_set_piomode,
+ .set_dmamode = sis_133_set_dmamode,
+ .cable_detect = sis_133_cable_detect,
+};
+
+static struct ata_port_operations sis_133_early_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_133_early_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_100_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_100_set_piomode,
+ .set_dmamode = sis_100_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_66_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_66_set_dmamode,
+ .cable_detect = sis_66_cable_detect,
+};
+
+static struct ata_port_operations sis_old_ops = {
+ .inherits = &sis_base_ops,
+ .set_piomode = sis_old_set_piomode,
+ .set_dmamode = sis_old_set_dmamode,
+ .cable_detect = ata_cable_40wire,
+};
+
+static const struct ata_port_info sis_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07,
+ .udma_mask = 0,
+ .port_ops = &sis_old_ops,
+};
+static const struct ata_port_info sis_info33 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA2, /* UDMA 33 */
+ .port_ops = &sis_old_ops,
+};
+static const struct ata_port_info sis_info66 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA4, /* UDMA 66 */
+ .port_ops = &sis_66_ops,
+};
+static const struct ata_port_info sis_info100 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sis_100_ops,
+};
+static const struct ata_port_info sis_info100_early = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .udma_mask = ATA_UDMA5,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .port_ops = &sis_66_ops,
+};
+static const struct ata_port_info sis_info133 = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sis_133_ops,
+};
+const struct ata_port_info sis_info133_for_sata = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sis_133_for_sata_ops,
+};
+static const struct ata_port_info sis_info133_early = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sis_133_early_ops,
+};
+
+/* Privately shared with the SiS180 SATA driver, not for use elsewhere */
+EXPORT_SYMBOL_GPL(sis_info133_for_sata);
+
+static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
+{
+ u16 regw;
+ u8 reg;
+
+ if (sis->info == &sis_info133) {
+ pci_read_config_word(pdev, 0x50, &regw);
+ if (regw & 0x08)
+ pci_write_config_word(pdev, 0x50, regw & ~0x08);
+ pci_read_config_word(pdev, 0x52, &regw);
+ if (regw & 0x08)
+ pci_write_config_word(pdev, 0x52, regw & ~0x08);
+ return;
+ }
+
+ if (sis->info == &sis_info133_early || sis->info == &sis_info100) {
+ /* Fix up latency */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+ /* Set compatibility bit */
+ pci_read_config_byte(pdev, 0x49, &reg);
+ if (!(reg & 0x01))
+ pci_write_config_byte(pdev, 0x49, reg | 0x01);
+ return;
+ }
+
+ if (sis->info == &sis_info66 || sis->info == &sis_info100_early) {
+ /* Fix up latency */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
+ /* Set compatibility bit */
+ pci_read_config_byte(pdev, 0x52, &reg);
+ if (!(reg & 0x04))
+ pci_write_config_byte(pdev, 0x52, reg | 0x04);
+ return;
+ }
+
+ if (sis->info == &sis_info33) {
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &reg);
+ if (( reg & 0x0F ) != 0x00)
+ pci_write_config_byte(pdev, PCI_CLASS_PROG, reg & 0xF0);
+ /* Fall through to ATA16 fixup below */
+ }
+
+ if (sis->info == &sis_info || sis->info == &sis_info33) {
+ /* force per drive recovery and active timings
+ needed on ATA_33 and below chips */
+ pci_read_config_byte(pdev, 0x52, &reg);
+ if (!(reg & 0x08))
+ pci_write_config_byte(pdev, 0x52, reg|0x08);
+ return;
+ }
+
+ BUG();
+}
+
+/**
+ * sis_init_one - Register SiS ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in sis_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ struct pci_dev *host = NULL;
+ struct sis_chipset *chipset = NULL;
+ struct sis_chipset *sets;
+ int rc;
+
+ static struct sis_chipset sis_chipsets[] = {
+
+ { 0x0968, &sis_info133 },
+ { 0x0966, &sis_info133 },
+ { 0x0965, &sis_info133 },
+ { 0x0745, &sis_info100 },
+ { 0x0735, &sis_info100 },
+ { 0x0733, &sis_info100 },
+ { 0x0635, &sis_info100 },
+ { 0x0633, &sis_info100 },
+
+ { 0x0730, &sis_info100_early }, /* 100 with ATA 66 layout */
+ { 0x0550, &sis_info100_early }, /* 100 with ATA 66 layout */
+
+ { 0x0640, &sis_info66 },
+ { 0x0630, &sis_info66 },
+ { 0x0620, &sis_info66 },
+ { 0x0540, &sis_info66 },
+ { 0x0530, &sis_info66 },
+
+ { 0x5600, &sis_info33 },
+ { 0x5598, &sis_info33 },
+ { 0x5597, &sis_info33 },
+ { 0x5591, &sis_info33 },
+ { 0x5582, &sis_info33 },
+ { 0x5581, &sis_info33 },
+
+ { 0x5596, &sis_info },
+ { 0x5571, &sis_info },
+ { 0x5517, &sis_info },
+ { 0x5511, &sis_info },
+
+ {0}
+ };
+ static struct sis_chipset sis133_early = {
+ 0x0, &sis_info133_early
+ };
+ static struct sis_chipset sis133 = {
+ 0x0, &sis_info133
+ };
+ static struct sis_chipset sis100_early = {
+ 0x0, &sis_info100_early
+ };
+ static struct sis_chipset sis100 = {
+ 0x0, &sis_info100
+ };
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* We have to find the bridge first */
+ for (sets = &sis_chipsets[0]; sets->device; sets++) {
+ host = pci_get_device(PCI_VENDOR_ID_SI, sets->device, NULL);
+ if (host != NULL) {
+ chipset = sets; /* Match found */
+ if (sets->device == 0x630) { /* SIS630 */
+ if (host->revision >= 0x30) /* 630 ET */
+ chipset = &sis100_early;
+ }
+ break;
+ }
+ }
+
+ /* Look for concealed bridges */
+ if (chipset == NULL) {
+ /* Second check */
+ u32 idemisc;
+ u16 trueid;
+
+ /* Disable ID masking and register remapping then
+ see what the real ID is */
+
+ pci_read_config_dword(pdev, 0x54, &idemisc);
+ pci_write_config_dword(pdev, 0x54, idemisc & 0x7fffffff);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+ pci_write_config_dword(pdev, 0x54, idemisc);
+
+ switch(trueid) {
+ case 0x5518: /* SIS 962/963 */
+ chipset = &sis133;
+ if ((idemisc & 0x40000000) == 0) {
+ pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
+ printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
+ }
+ break;
+ case 0x0180: /* SIS 965/965L */
+ chipset = &sis133;
+ break;
+ case 0x1180: /* SIS 966/966L */
+ chipset = &sis133;
+ break;
+ }
+ }
+
+ /* Further check */
+ if (chipset == NULL) {
+ struct pci_dev *lpc_bridge;
+ u16 trueid;
+ u8 prefctl;
+ u8 idecfg;
+
+ /* Try the second unmasking technique */
+ pci_read_config_byte(pdev, 0x4a, &idecfg);
+ pci_write_config_byte(pdev, 0x4a, idecfg | 0x10);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &trueid);
+ pci_write_config_byte(pdev, 0x4a, idecfg);
+
+ switch(trueid) {
+ case 0x5517:
+ lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
+ if (lpc_bridge == NULL)
+ break;
+ pci_read_config_byte(pdev, 0x49, &prefctl);
+ pci_dev_put(lpc_bridge);
+
+ if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
+ chipset = &sis133_early;
+ break;
+ }
+ chipset = &sis100;
+ break;
+ }
+ }
+ pci_dev_put(host);
+
+ /* No chipset info, no support */
+ if (chipset == NULL)
+ return -ENODEV;
+
+ ppi[0] = chipset->info;
+
+ sis_fixup(pdev, chipset);
+
+ return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset);
+}
+
+static const struct pci_device_id sis_pci_tbl[] = {
+ { PCI_VDEVICE(SI, 0x5513), }, /* SiS 5513 */
+ { PCI_VDEVICE(SI, 0x5518), }, /* SiS 5518 */
+ { PCI_VDEVICE(SI, 0x1180), }, /* SiS 1180 */
+
+ { }
+};
+
+static struct pci_driver sis_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sis_pci_tbl,
+ .probe = sis_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init sis_init(void)
+{
+ return pci_register_driver(&sis_pci_driver);
+}
+
+static void __exit sis_exit(void)
+{
+ pci_unregister_driver(&sis_pci_driver);
+}
+
+module_init(sis_init);
+module_exit(sis_exit);
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("SCSI low-level driver for SiS ATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
new file mode 100644
index 0000000..1b0e7b6
--- /dev/null
+++ b/drivers/ata/pata_sl82c105.c
@@ -0,0 +1,352 @@
+/*
+ * pata_sl82c105.c - SL82C105 PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ *
+ * Based in part on linux/drivers/ide/pci/sl82c105.c
+ * SL82C105/Winbond 553 IDE driver
+ *
+ * and in part on the documentation and errata sheet
+ *
+ *
+ * Note: The controller like many controllers has shared timings for
+ * PIO and DMA. We thus flip to the DMA timings in dma_start and flip back
+ * in the dma_stop function. Thus we actually don't need a set_dmamode
+ * method as the PIO method is always called and will set the right PIO
+ * timing parameters.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_sl82c105"
+#define DRV_VERSION "0.3.3"
+
+enum {
+ /*
+ * SL82C105 PCI config register 0x40 bits.
+ */
+ CTRL_IDE_IRQB = (1 << 30),
+ CTRL_IDE_IRQA = (1 << 28),
+ CTRL_LEGIRQ = (1 << 11),
+ CTRL_P1F16 = (1 << 5),
+ CTRL_P1EN = (1 << 4),
+ CTRL_P0F16 = (1 << 1),
+ CTRL_P0EN = (1 << 0)
+};
+
+/**
+ * sl82c105_pre_reset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int sl82c105_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits sl82c105_enable_bits[] = {
+ { 0x40, 1, 0x01, 0x01 },
+ { 0x40, 1, 0x10, 0x10 }
+ };
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no]))
+ return -ENOENT;
+ return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ * sl82c105_configure_piomode - set chip PIO timing
+ * @ap: ATA interface
+ * @adev: ATA device
+ * @pio: PIO mode
+ *
+ * Called to do the PIO mode setup. Our timing registers are shared
+ * so a configure_dmamode call will undo any work we do here and vice
+ * versa
+ */
+
+static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static u16 pio_timing[5] = {
+ 0x50D, 0x407, 0x304, 0x242, 0x240
+ };
+ u16 dummy;
+ int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+
+ pci_write_config_word(pdev, timing, pio_timing[pio]);
+ /* Can we lose this oddity of the old driver */
+ pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ * sl82c105_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Called to do the PIO mode setup. Our timing registers are shared
+ * but we want to set the PIO timing by default.
+ */
+
+static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
+ * sl82c105_configure_dmamode - set DMA mode in chip
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Load DMA cycle times into the chip ready for a DMA transfer
+ * to occur.
+ */
+
+static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static u16 dma_timing[3] = {
+ 0x707, 0x201, 0x200
+ };
+ u16 dummy;
+ int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
+ int dma = adev->dma_mode - XFER_MW_DMA_0;
+
+ pci_write_config_word(pdev, timing, dma_timing[dma]);
+ /* Can we lose this oddity of the old driver */
+ pci_read_config_word(pdev, timing, &dummy);
+}
+
+/**
+ * sl82c105_reset_engine - Reset the DMA engine
+ * @ap: ATA interface
+ *
+ * The sl82c105 has some serious problems with the DMA engine
+ * when transfers don't run as expected or ATAPI is used. The
+ * recommended fix is to reset the engine each use using a chip
+ * test register.
+ */
+
+static void sl82c105_reset_engine(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u16 val;
+
+ pci_read_config_word(pdev, 0x7E, &val);
+ pci_write_config_word(pdev, 0x7E, val | 4);
+ pci_write_config_word(pdev, 0x7E, val & ~4);
+}
+
+/**
+ * sl82c105_bmdma_start - DMA engine begin
+ * @qc: ATA command
+ *
+ * Reset the DMA engine each use as recommended by the errata
+ * document.
+ *
+ * FIXME: if we switch clock at BMDMA start/end we might get better
+ * PIO performance on DMA capable devices.
+ */
+
+static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ udelay(100);
+ sl82c105_reset_engine(ap);
+ udelay(100);
+
+ /* Set the clocks for DMA */
+ sl82c105_configure_dmamode(ap, qc->dev);
+ /* Activate DMA */
+ ata_bmdma_start(qc);
+}
+
+/**
+ * sl82c105_bmdma_end - DMA engine stop
+ * @qc: ATA command
+ *
+ * Reset the DMA engine each use as recommended by the errata
+ * document.
+ *
+ * This function is also called to turn off DMA when a timeout occurs
+ * during DMA operation. In both cases we need to reset the engine,
+ * so no actual eng_timeout handler is required.
+ *
+ * We assume bmdma_stop is always called if bmdma_start as called. If
+ * not then we may need to wrap qc_issue.
+ */
+
+static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ ata_bmdma_stop(qc);
+ sl82c105_reset_engine(ap);
+ udelay(100);
+
+ /* This will redo the initial setup of the DMA device to matching
+ PIO timings */
+ sl82c105_set_piomode(ap, qc->dev);
+}
+
+/**
+ * sl82c105_qc_defer - implement serialization
+ * @qc: command
+ *
+ * We must issue one command per host not per channel because
+ * of the reset bug.
+ *
+ * Q: is the scsi host lock sufficient ?
+ */
+
+static int sl82c105_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_host *host = qc->ap->host;
+ struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
+ int rc;
+
+ /* First apply the usual rules */
+ rc = ata_std_qc_defer(qc);
+ if (rc != 0)
+ return rc;
+
+ /* Now apply serialization rules. Only allow a command if the
+ other channel state machine is idle */
+ if (alt && alt->qc_active)
+ return ATA_DEFER_PORT;
+ return 0;
+}
+
+static struct scsi_host_template sl82c105_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sl82c105_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .qc_defer = sl82c105_qc_defer,
+ .bmdma_start = sl82c105_bmdma_start,
+ .bmdma_stop = sl82c105_bmdma_stop,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = sl82c105_set_piomode,
+ .prereset = sl82c105_pre_reset,
+};
+
+/**
+ * sl82c105_bridge_revision - find bridge version
+ * @pdev: PCI device for the ATA function
+ *
+ * Locates the PCI bridge associated with the ATA function and
+ * providing it is a Winbond 553 reports the revision. If it cannot
+ * find a revision or the right device it returns -1
+ */
+
+static int sl82c105_bridge_revision(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+
+ /*
+ * The bridge should be part of the same device, but function 0.
+ */
+ bridge = pci_get_slot(pdev->bus,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+ if (!bridge)
+ return -1;
+
+ /*
+ * Make sure it is a Winbond 553 and is an ISA bridge.
+ */
+ if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
+ bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
+ bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
+ pci_dev_put(bridge);
+ return -1;
+ }
+ /*
+ * We need to find function 0's revision, not function 1
+ */
+ pci_dev_put(bridge);
+ return bridge->revision;
+}
+
+
+static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info_dma = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &sl82c105_port_ops
+ };
+ static const struct ata_port_info info_early = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .port_ops = &sl82c105_port_ops
+ };
+ /* for now use only the first port */
+ const struct ata_port_info *ppi[] = { &info_early,
+ NULL };
+ u32 val;
+ int rev;
+ int rc;
+
+ rc = pcim_enable_device(dev);
+ if (rc)
+ return rc;
+
+ rev = sl82c105_bridge_revision(dev);
+
+ if (rev == -1)
+ dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
+ else if (rev <= 5)
+ dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
+ else
+ ppi[0] = &info_dma;
+
+ pci_read_config_dword(dev, 0x40, &val);
+ val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+ pci_write_config_dword(dev, 0x40, val);
+
+ return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL);
+}
+
+static const struct pci_device_id sl82c105[] = {
+ { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
+
+ { },
+};
+
+static struct pci_driver sl82c105_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sl82c105,
+ .probe = sl82c105_init_one,
+ .remove = ata_pci_remove_one
+};
+
+static int __init sl82c105_init(void)
+{
+ return pci_register_driver(&sl82c105_pci_driver);
+}
+
+static void __exit sl82c105_exit(void)
+{
+ pci_unregister_driver(&sl82c105_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Sl82c105");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sl82c105);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sl82c105_init);
+module_exit(sl82c105_exit);
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
new file mode 100644
index 0000000..ef95975
--- /dev/null
+++ b/drivers/ata/pata_triflex.c
@@ -0,0 +1,241 @@
+/*
+ * pata_triflex.c - Compaq PATA for new ATA layer
+ * (C) 2005 Red Hat Inc
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * based upon
+ *
+ * triflex.c
+ *
+ * IDE Chipset driver for the Compaq TriFlex IDE controller.
+ *
+ * Known to work with the Compaq Workstation 5x00 series.
+ *
+ * Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
+ * Author: Torben Mathiasen <torben.mathiasen@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Loosely based on the piix & svwks drivers.
+ *
+ * Documentation:
+ * Not publically available.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pata_triflex"
+#define DRV_VERSION "0.2.8"
+
+/**
+ * triflex_prereset - probe begin
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Set up cable type and use generic probe init
+ */
+
+static int triflex_prereset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits triflex_enable_bits[] = {
+ { 0x80, 1, 0x01, 0x01 },
+ { 0x80, 1, 0x02, 0x02 }
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+
+
+/**
+ * triflex_load_timing - timing configuration
+ * @ap: ATA interface
+ * @adev: Device on the bus
+ * @speed: speed to configure
+ *
+ * The Triflex has one set of timings per device per channel. This
+ * means we must do some switching. As the PIO and DMA timings don't
+ * match we have to do some reloading unlike PIIX devices where tuning
+ * tricks can avoid it.
+ */
+
+static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 timing = 0;
+ u32 triflex_timing, old_triflex_timing;
+ int channel_offset = ap->port_no ? 0x74: 0x70;
+ unsigned int is_slave = (adev->devno != 0);
+
+
+ pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
+ triflex_timing = old_triflex_timing;
+
+ switch(speed)
+ {
+ case XFER_MW_DMA_2:
+ timing = 0x0103;break;
+ case XFER_MW_DMA_1:
+ timing = 0x0203;break;
+ case XFER_MW_DMA_0:
+ timing = 0x0808;break;
+ case XFER_SW_DMA_2:
+ case XFER_SW_DMA_1:
+ case XFER_SW_DMA_0:
+ timing = 0x0F0F;break;
+ case XFER_PIO_4:
+ timing = 0x0202;break;
+ case XFER_PIO_3:
+ timing = 0x0204;break;
+ case XFER_PIO_2:
+ timing = 0x0404;break;
+ case XFER_PIO_1:
+ timing = 0x0508;break;
+ case XFER_PIO_0:
+ timing = 0x0808;break;
+ default:
+ BUG();
+ }
+ triflex_timing &= ~ (0xFFFF << (16 * is_slave));
+ triflex_timing |= (timing << (16 * is_slave));
+
+ if (triflex_timing != old_triflex_timing)
+ pci_write_config_dword(pdev, channel_offset, triflex_timing);
+}
+
+/**
+ * triflex_set_piomode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * Use the timing loader to set up the PIO mode. We have to do this
+ * because DMA start/stop will only be called once DMA occurs. If there
+ * has been no DMA then the PIO timings are still needed.
+ */
+static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ triflex_load_timing(ap, adev, adev->pio_mode);
+}
+
+/**
+ * triflex_dma_start - DMA start callback
+ * @qc: Command in progress
+ *
+ * Usually drivers set the DMA timing at the point the set_dmamode call
+ * is made. Triflex however requires we load new timings on the
+ * transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
+ * We load the DMA timings just before starting DMA and then restore
+ * the PIO timing when the DMA is finished.
+ */
+
+static void triflex_bmdma_start(struct ata_queued_cmd *qc)
+{
+ triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
+ ata_bmdma_start(qc);
+}
+
+/**
+ * triflex_dma_stop - DMA stop callback
+ * @ap: ATA interface
+ * @adev: ATA device
+ *
+ * We loaded new timings in dma_start, as a result we need to restore
+ * the PIO timings in dma_stop so that the next command issue gets the
+ * right clock values.
+ */
+
+static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ ata_bmdma_stop(qc);
+ triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
+}
+
+static struct scsi_host_template triflex_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations triflex_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .bmdma_start = triflex_bmdma_start,
+ .bmdma_stop = triflex_bmdma_stop,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = triflex_set_piomode,
+ .prereset = triflex_prereset,
+};
+
+static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ static const struct ata_port_info info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &triflex_port_ops
+ };
+ const struct ata_port_info *ppi[] = { &info, NULL };
+ static int printed_version;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+
+ return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL);
+}
+
+static const struct pci_device_id triflex[] = {
+ { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), },
+
+ { },
+};
+
+static struct pci_driver triflex_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = triflex,
+ .probe = triflex_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static int __init triflex_init(void)
+{
+ return pci_register_driver(&triflex_pci_driver);
+}
+
+static void __exit triflex_exit(void)
+{
+ pci_unregister_driver(&triflex_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, triflex);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(triflex_init);
+module_exit(triflex_exit);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
new file mode 100644
index 0000000..ba556d3
--- /dev/null
+++ b/drivers/ata/pata_via.c
@@ -0,0 +1,636 @@
+/*
+ * pata_via.c - VIA PATA for new ATA layer
+ * (C) 2005-2006 Red Hat Inc
+ *
+ * Documentation
+ * Most chipset documentation available under NDA only
+ *
+ * VIA version guide
+ * VIA VT82C561 - early design, uses ata_generic currently
+ * VIA VT82C576 - MWDMA, 33Mhz
+ * VIA VT82C586 - MWDMA, 33Mhz
+ * VIA VT82C586a - Added UDMA to 33Mhz
+ * VIA VT82C586b - UDMA33
+ * VIA VT82C596a - Nonfunctional UDMA66
+ * VIA VT82C596b - Working UDMA66
+ * VIA VT82C686 - Nonfunctional UDMA66
+ * VIA VT82C686a - Working UDMA66
+ * VIA VT82C686b - Updated to UDMA100
+ * VIA VT8231 - UDMA100
+ * VIA VT8233 - UDMA100
+ * VIA VT8233a - UDMA133
+ * VIA VT8233c - UDMA100
+ * VIA VT8235 - UDMA133
+ * VIA VT8237 - UDMA133
+ * VIA VT8237S - UDMA133
+ * VIA VT8251 - UDMA133
+ *
+ * Most registers remain compatible across chips. Others start reserved
+ * and acquire sensible semantics if set to 1 (eg cable detect). A few
+ * exceptions exist, notably around the FIFO settings.
+ *
+ * One additional quirk of the VIA design is that like ALi they use few
+ * PCI IDs for a lot of chips.
+ *
+ * Based heavily on:
+ *
+ * Version 3.38
+ *
+ * VIA IDE driver for Linux. Supported southbridges:
+ *
+ * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
+ * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
+ * vt8235, vt8237
+ *
+ * Copyright (c) 2000-2002 Vojtech Pavlik
+ *
+ * Based on the work of:
+ * Michel Aubry
+ * Jeff Garzik
+ * Andre Hedrick
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_via"
+#define DRV_VERSION "0.3.3"
+
+/*
+ * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
+ * driver.
+ */
+
+enum {
+ VIA_UDMA = 0x007,
+ VIA_UDMA_NONE = 0x000,
+ VIA_UDMA_33 = 0x001,
+ VIA_UDMA_66 = 0x002,
+ VIA_UDMA_100 = 0x003,
+ VIA_UDMA_133 = 0x004,
+ VIA_BAD_PREQ = 0x010, /* Crashes if PREQ# till DDACK# set */
+ VIA_BAD_CLK66 = 0x020, /* 66 MHz clock doesn't work correctly */
+ VIA_SET_FIFO = 0x040, /* Needs to have FIFO split set */
+ VIA_NO_UNMASK = 0x080, /* Doesn't work with IRQ unmasking on */
+ VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
+ VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
+ VIA_NO_ENABLES = 0x400, /* Has no enablebits */
+ VIA_SATA_PATA = 0x800, /* SATA/PATA combined configuration */
+};
+
+enum {
+ VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
+};
+
+/*
+ * VIA SouthBridge chips.
+ */
+
+static const struct via_isa_bridge {
+ const char *name;
+ u16 id;
+ u8 rev_min;
+ u8 rev_max;
+ u16 flags;
+} via_isa_bridges[] = {
+ { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f,
+ VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
+ { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 |
+ VIA_BAD_AST | VIA_SATA_PATA },
+ { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f,
+ VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
+ { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
+ { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
+ { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
+ { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 },
+ { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 },
+ { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 },
+ { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 },
+ { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
+ { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 },
+ { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
+ { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
+ { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
+ { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
+ { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
+ { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
+ { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
+ { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+ { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f,
+ VIA_UDMA_133 | VIA_BAD_AST },
+ { NULL }
+};
+
+
+/*
+ * Cable special cases
+ */
+
+static const struct dmi_system_id cable_dmi_table[] = {
+ {
+ .ident = "Acer Ferrari 3400",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
+ },
+ },
+ { }
+};
+
+static int via_cable_override(struct pci_dev *pdev)
+{
+ /* Systems by DMI */
+ if (dmi_check_system(cable_dmi_table))
+ return 1;
+ /* Arima W730-K8/Targa Visionary 811/... */
+ if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032)
+ return 1;
+ return 0;
+}
+
+
+/**
+ * via_cable_detect - cable detection
+ * @ap: ATA port
+ *
+ * Perform cable detection. Actually for the VIA case the BIOS
+ * already did this for us. We read the values provided by the
+ * BIOS. If you are using an 8235 in a non-PC configuration you
+ * may need to update this code.
+ *
+ * Hotplug also impacts on this.
+ */
+
+static int via_cable_detect(struct ata_port *ap) {
+ const struct via_isa_bridge *config = ap->host->private_data;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 ata66;
+
+ if (via_cable_override(pdev))
+ return ATA_CBL_PATA40_SHORT;
+
+ if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
+ return ATA_CBL_SATA;
+
+ /* Early chips are 40 wire */
+ if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
+ return ATA_CBL_PATA40;
+ /* UDMA 66 chips have only drive side logic */
+ else if ((config->flags & VIA_UDMA) < VIA_UDMA_100)
+ return ATA_CBL_PATA_UNK;
+ /* UDMA 100 or later */
+ pci_read_config_dword(pdev, 0x50, &ata66);
+ /* Check both the drive cable reporting bits, we might not have
+ two drives */
+ if (ata66 & (0x10100000 >> (16 * ap->port_no)))
+ return ATA_CBL_PATA80;
+ /* Check with ACPI so we can spot BIOS reported SATA bridges */
+ if (ata_acpi_init_gtm(ap) &&
+ ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
+ return ATA_CBL_PATA80;
+ return ATA_CBL_PATA40;
+}
+
+static int via_pre_reset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ const struct via_isa_bridge *config = ap->host->private_data;
+
+ if (!(config->flags & VIA_NO_ENABLES)) {
+ static const struct pci_bits via_enable_bits[] = {
+ { 0x40, 1, 0x02, 0x02 },
+ { 0x40, 1, 0x01, 0x01 }
+ };
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no]))
+ return -ENOENT;
+ }
+
+ return ata_sff_prereset(link, deadline);
+}
+
+
+/**
+ * via_do_set_mode - set initial PIO mode data
+ * @ap: ATA interface
+ * @adev: ATA device
+ * @mode: ATA mode being programmed
+ * @tdiv: Clocks per PCI clock
+ * @set_ast: Set to program address setup
+ * @udma_type: UDMA mode/format of registers
+ *
+ * Program the VIA registers for DMA and PIO modes. Uses the ata timing
+ * support in order to compute modes.
+ *
+ * FIXME: Hotplug will require we serialize multiple mode changes
+ * on the two channels.
+ */
+
+static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_device *peer = ata_dev_pair(adev);
+ struct ata_timing t, p;
+ static int via_clock = 33333; /* Bus clock in kHZ - ought to be tunable one day */
+ unsigned long T = 1000000000 / via_clock;
+ unsigned long UT = T/tdiv;
+ int ut;
+ int offset = 3 - (2*ap->port_no) - adev->devno;
+
+ /* Calculate the timing values we require */
+ ata_timing_compute(adev, mode, &t, T, UT);
+
+ /* We share 8bit timing so we must merge the constraints */
+ if (peer) {
+ if (peer->pio_mode) {
+ ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
+ ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
+ }
+ }
+
+ /* Address setup is programmable but breaks on UDMA133 setups */
+ if (set_ast) {
+ u8 setup; /* 2 bits per drive */
+ int shift = 2 * offset;
+
+ pci_read_config_byte(pdev, 0x4C, &setup);
+ setup &= ~(3 << shift);
+ setup |= clamp_val(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
+ pci_write_config_byte(pdev, 0x4C, setup);
+ }
+
+ /* Load the PIO mode bits */
+ pci_write_config_byte(pdev, 0x4F - ap->port_no,
+ ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
+ pci_write_config_byte(pdev, 0x48 + offset,
+ ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
+
+ /* Load the UDMA bits according to type */
+ switch(udma_type) {
+ default:
+ /* BUG() ? */
+ /* fall through */
+ case 33:
+ ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
+ break;
+ case 66:
+ ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
+ break;
+ case 100:
+ ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
+ break;
+ case 133:
+ ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
+ break;
+ }
+
+ /* Set UDMA unless device is not UDMA capable */
+ if (udma_type && t.udma) {
+ u8 cable80_status;
+
+ /* Get 80-wire cable detection bit */
+ pci_read_config_byte(pdev, 0x50 + offset, &cable80_status);
+ cable80_status &= 0x10;
+
+ pci_write_config_byte(pdev, 0x50 + offset, ut | cable80_status);
+ }
+}
+
+static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ const struct via_isa_bridge *config = ap->host->private_data;
+ int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+ int mode = config->flags & VIA_UDMA;
+ static u8 tclock[5] = { 1, 1, 2, 3, 4 };
+ static u8 udma[5] = { 0, 33, 66, 100, 133 };
+
+ via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]);
+}
+
+static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ const struct via_isa_bridge *config = ap->host->private_data;
+ int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
+ int mode = config->flags & VIA_UDMA;
+ static u8 tclock[5] = { 1, 1, 2, 3, 4 };
+ static u8 udma[5] = { 0, 33, 66, 100, 133 };
+
+ via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
+}
+
+/**
+ * via_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller.
+ *
+ * Note: This is to fix the internal bug of via chipsets, which
+ * will reset the device register after changing the IEN bit on
+ * ctl register
+ */
+static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_taskfile tmp_tf;
+
+ if (ap->ctl != ap->last_ctl && !(tf->flags & ATA_TFLAG_DEVICE)) {
+ tmp_tf = *tf;
+ tmp_tf.flags |= ATA_TFLAG_DEVICE;
+ tf = &tmp_tf;
+ }
+ ata_sff_tf_load(ap, tf);
+}
+
+static struct scsi_host_template via_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations via_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = via_cable_detect,
+ .set_piomode = via_set_piomode,
+ .set_dmamode = via_set_dmamode,
+ .prereset = via_pre_reset,
+ .sff_tf_load = via_tf_load,
+};
+
+static struct ata_port_operations via_port_ops_noirq = {
+ .inherits = &via_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+};
+
+/**
+ * via_config_fifo - set up the FIFO
+ * @pdev: PCI device
+ * @flags: configuration flags
+ *
+ * Set the FIFO properties for this device if necessary. Used both on
+ * set up and on and the resume path
+ */
+
+static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
+{
+ u8 enable;
+
+ /* 0x40 low bits indicate enabled channels */
+ pci_read_config_byte(pdev, 0x40 , &enable);
+ enable &= 3;
+
+ if (flags & VIA_SET_FIFO) {
+ static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
+ u8 fifo;
+
+ pci_read_config_byte(pdev, 0x43, &fifo);
+
+ /* Clear PREQ# until DDACK# for errata */
+ if (flags & VIA_BAD_PREQ)
+ fifo &= 0x7F;
+ else
+ fifo &= 0x9f;
+ /* Turn on FIFO for enabled channels */
+ fifo |= fifo_setting[enable];
+ pci_write_config_byte(pdev, 0x43, fifo);
+ }
+}
+
+/**
+ * via_init_one - discovery callback
+ * @pdev: PCI device
+ * @id: PCI table info
+ *
+ * A VIA IDE interface has been discovered. Figure out what revision
+ * and perform configuration work before handing it to the ATA layer
+ */
+
+static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ /* Early VIA without UDMA support */
+ static const struct ata_port_info via_mwdma_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &via_port_ops
+ };
+ /* Ditto with IRQ masking required */
+ static const struct ata_port_info via_mwdma_info_borked = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .port_ops = &via_port_ops_noirq,
+ };
+ /* VIA UDMA 33 devices (and borked 66) */
+ static const struct ata_port_info via_udma33_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA2,
+ .port_ops = &via_port_ops
+ };
+ /* VIA UDMA 66 devices */
+ static const struct ata_port_info via_udma66_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &via_port_ops
+ };
+ /* VIA UDMA 100 devices */
+ static const struct ata_port_info via_udma100_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &via_port_ops
+ };
+ /* UDMA133 with bad AST (All current 133) */
+ static const struct ata_port_info via_udma133_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */
+ .port_ops = &via_port_ops
+ };
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ struct pci_dev *isa = NULL;
+ const struct via_isa_bridge *config;
+ static int printed_version;
+ u8 enable;
+ u32 timing;
+ unsigned long flags = id->driver_data;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (flags & VIA_IDFLAG_SINGLE)
+ ppi[1] = &ata_dummy_port_info;
+
+ /* To find out how the IDE will behave and what features we
+ actually have to look at the bridge not the IDE controller */
+ for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
+ config++)
+ if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
+ !!(config->flags & VIA_BAD_ID),
+ config->id, NULL))) {
+
+ if (isa->revision >= config->rev_min &&
+ isa->revision <= config->rev_max)
+ break;
+ pci_dev_put(isa);
+ }
+
+ pci_dev_put(isa);
+
+ if (!(config->flags & VIA_NO_ENABLES)) {
+ /* 0x40 low bits indicate enabled channels */
+ pci_read_config_byte(pdev, 0x40 , &enable);
+ enable &= 3;
+ if (enable == 0)
+ return -ENODEV;
+ }
+
+ /* Initialise the FIFO for the enabled channels. */
+ via_config_fifo(pdev, config->flags);
+
+ /* Clock set up */
+ switch(config->flags & VIA_UDMA) {
+ case VIA_UDMA_NONE:
+ if (config->flags & VIA_NO_UNMASK)
+ ppi[0] = &via_mwdma_info_borked;
+ else
+ ppi[0] = &via_mwdma_info;
+ break;
+ case VIA_UDMA_33:
+ ppi[0] = &via_udma33_info;
+ break;
+ case VIA_UDMA_66:
+ ppi[0] = &via_udma66_info;
+ /* The 66 MHz devices require we enable the clock */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing |= 0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ break;
+ case VIA_UDMA_100:
+ ppi[0] = &via_udma100_info;
+ break;
+ case VIA_UDMA_133:
+ ppi[0] = &via_udma133_info;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
+ if (config->flags & VIA_BAD_CLK66) {
+ /* Disable the 66MHz clock on problem devices */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing &= ~0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+
+ /* We have established the device type, now fire it up */
+ return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config);
+}
+
+#ifdef CONFIG_PM
+/**
+ * via_reinit_one - reinit after resume
+ * @pdev; PCI device
+ *
+ * Called when the VIA PATA device is resumed. We must then
+ * reconfigure the fifo and other setup we may have altered. In
+ * addition the kernel needs to have the resume methods on PCI
+ * quirk supported.
+ */
+
+static int via_reinit_one(struct pci_dev *pdev)
+{
+ u32 timing;
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ const struct via_isa_bridge *config = host->private_data;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ via_config_fifo(pdev, config->flags);
+
+ if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
+ /* The 66 MHz devices require we enable the clock */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing |= 0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+ if (config->flags & VIA_BAD_CLK66) {
+ /* Disable the 66MHz clock on problem devices */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing &= ~0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+static const struct pci_device_id via[] = {
+ { PCI_VDEVICE(VIA, 0x0415), },
+ { PCI_VDEVICE(VIA, 0x0571), },
+ { PCI_VDEVICE(VIA, 0x0581), },
+ { PCI_VDEVICE(VIA, 0x1571), },
+ { PCI_VDEVICE(VIA, 0x3164), },
+ { PCI_VDEVICE(VIA, 0x5324), },
+ { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
+
+ { },
+};
+
+static struct pci_driver via_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = via,
+ .probe = via_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = via_reinit_one,
+#endif
+};
+
+static int __init via_init(void)
+{
+ return pci_register_driver(&via_pci_driver);
+}
+
+static void __exit via_exit(void)
+{
+ pci_unregister_driver(&via_pci_driver);
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for VIA PATA");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, via);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(via_init);
+module_exit(via_exit);
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
new file mode 100644
index 0000000..319e164
--- /dev/null
+++ b/drivers/ata/pata_winbond.c
@@ -0,0 +1,282 @@
+/*
+ * pata_winbond.c - Winbond VLB ATA controllers
+ * (C) 2006 Red Hat
+ *
+ * Support for the Winbond 83759A when operating in advanced mode.
+ * Multichip mode is not currently supported.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "pata_winbond"
+#define DRV_VERSION "0.0.3"
+
+#define NR_HOST 4 /* Two winbond controllers, two channels each */
+
+struct winbond_data {
+ unsigned long config;
+ struct platform_device *platform_dev;
+};
+
+static struct ata_host *winbond_host[NR_HOST];
+static struct winbond_data winbond_data[NR_HOST];
+static int nr_winbond_host;
+
+#ifdef MODULE
+static int probe_winbond = 1;
+#else
+static int probe_winbond;
+#endif
+
+static DEFINE_SPINLOCK(winbond_lock);
+
+static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ outb(val, port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+}
+
+static u8 winbond_readcfg(unsigned long port, u8 reg)
+{
+ u8 val;
+
+ unsigned long flags;
+ spin_lock_irqsave(&winbond_lock, flags);
+ outb(reg, port + 0x01);
+ val = inb(port + 0x02);
+ spin_unlock_irqrestore(&winbond_lock, flags);
+
+ return val;
+}
+
+static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ata_timing t;
+ struct winbond_data *winbond = ap->host->private_data;
+ int active, recovery;
+ u8 reg;
+ int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
+
+ reg = winbond_readcfg(winbond->config, 0x81);
+
+ /* Get the timing data in cycles */
+ if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+ else
+ ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+
+ active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
+ recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
+ timing = (active << 4) | recovery;
+ winbond_writecfg(winbond->config, timing, reg);
+
+ /* Load the setup timing */
+
+ reg = 0x35;
+ if (adev->class != ATA_DEV_ATA)
+ reg |= 0x08; /* FIFO off */
+ if (!ata_pio_need_iordy(adev))
+ reg |= 0x02; /* IORDY off */
+ reg |= (clamp_val(t.setup, 0, 3) << 6);
+ winbond_writecfg(winbond->config, timing + 1, reg);
+}
+
+
+static unsigned int winbond_data_xfer(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct ata_port *ap = dev->link->ap;
+ int slop = buflen & 3;
+
+ if (ata_id_has_dword_io(dev->id)) {
+ if (rw == READ)
+ ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+ else
+ iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
+
+ if (unlikely(slop)) {
+ __le32 pad;
+ if (rw == READ) {
+ pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
+ memcpy(buf + buflen - slop, &pad, slop);
+ } else {
+ memcpy(&pad, buf + buflen - slop, slop);
+ iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
+ }
+ buflen += 4 - slop;
+ }
+ } else
+ buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ return buflen;
+}
+
+static struct scsi_host_template winbond_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations winbond_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = winbond_data_xfer,
+ .cable_detect = ata_cable_40wire,
+ .set_piomode = winbond_set_piomode,
+};
+
+/**
+ * winbond_init_one - attach a winbond interface
+ * @type: Type to display
+ * @io: I/O port start
+ * @irq: interrupt line
+ * @fast: True if on a > 33Mhz VLB
+ *
+ * Register a VLB bus IDE interface. Such interfaces are PIO and we
+ * assume do not support IRQ sharing.
+ */
+
+static __init int winbond_init_one(unsigned long port)
+{
+ struct platform_device *pdev;
+ u8 reg;
+ int i, rc;
+
+ reg = winbond_readcfg(port, 0x81);
+ reg |= 0x80; /* jumpered mode off */
+ winbond_writecfg(port, 0x81, reg);
+ reg = winbond_readcfg(port, 0x83);
+ reg |= 0xF0; /* local control */
+ winbond_writecfg(port, 0x83, reg);
+ reg = winbond_readcfg(port, 0x85);
+ reg |= 0xF0; /* programmable timing */
+ winbond_writecfg(port, 0x85, reg);
+
+ reg = winbond_readcfg(port, 0x81);
+
+ if (!(reg & 0x03)) /* Disabled */
+ return -ENODEV;
+
+ for (i = 0; i < 2 ; i ++) {
+ unsigned long cmd_port = 0x1F0 - (0x80 * i);
+ unsigned long ctl_port = cmd_port + 0x206;
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *cmd_addr, *ctl_addr;
+
+ if (!(reg & (1 << i)))
+ continue;
+
+ pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ rc = -ENOMEM;
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto err_unregister;
+ ap = host->ports[0];
+
+ rc = -ENOMEM;
+ cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
+ ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
+ if (!cmd_addr || !ctl_addr)
+ goto err_unregister;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
+
+ ap->ops = &winbond_port_ops;
+ ap->pio_mask = 0x1F;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->ioaddr.cmd_addr = cmd_addr;
+ ap->ioaddr.altstatus_addr = ctl_addr;
+ ap->ioaddr.ctl_addr = ctl_addr;
+ ata_sff_std_ports(&ap->ioaddr);
+
+ /* hook in a private data structure per channel */
+ host->private_data = &winbond_data[nr_winbond_host];
+ winbond_data[nr_winbond_host].config = port;
+ winbond_data[nr_winbond_host].platform_dev = pdev;
+
+ /* activate */
+ rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
+ &winbond_sht);
+ if (rc)
+ goto err_unregister;
+
+ winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
+ }
+
+ return 0;
+
+ err_unregister:
+ platform_device_unregister(pdev);
+ return rc;
+}
+
+/**
+ * winbond_init - attach winbond interfaces
+ *
+ * Attach winbond IDE interfaces by scanning the ports it may occupy.
+ */
+
+static __init int winbond_init(void)
+{
+ static const unsigned long config[2] = { 0x130, 0x1B0 };
+
+ int ct = 0;
+ int i;
+
+ if (probe_winbond == 0)
+ return -ENODEV;
+
+ /*
+ * Check both base addresses
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (probe_winbond & (1<<i)) {
+ int ret = 0;
+ unsigned long port = config[i];
+
+ if (request_region(port, 2, "pata_winbond")) {
+ ret = winbond_init_one(port);
+ if (ret <= 0)
+ release_region(port, 2);
+ else ct+= ret;
+ }
+ }
+ }
+ if (ct != 0)
+ return 0;
+ return -ENODEV;
+}
+
+static __exit void winbond_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_winbond_host; i++) {
+ ata_host_detach(winbond_host[i]);
+ release_region(winbond_data[i].config, 2);
+ platform_device_unregister(winbond_data[i].platform_dev);
+ }
+}
+
+MODULE_AUTHOR("Alan Cox");
+MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(winbond_init);
+module_exit(winbond_exit);
+
+module_param(probe_winbond, int, 0);
+
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
new file mode 100644
index 0000000..be53545
--- /dev/null
+++ b/drivers/ata/pdc_adma.c
@@ -0,0 +1,695 @@
+/*
+ * pdc_adma.c - Pacific Digital Corporation ADMA
+ *
+ * Maintained by: Mark Lord <mlord@pobox.com>
+ *
+ * Copyright 2005 Mark Lord
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ *
+ * Supports ATA disks in single-packet ADMA mode.
+ * Uses PIO for everything else.
+ *
+ * TODO: Use ADMA transfers for ATAPI devices, when possible.
+ * This requires careful attention to a number of quirks of the chip.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "pdc_adma"
+#define DRV_VERSION "1.0"
+
+/* macro to calculate base address for ATA regs */
+#define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40))
+
+/* macro to calculate base address for ADMA regs */
+#define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20))
+
+/* macro to obtain addresses from ata_port */
+#define ADMA_PORT_REGS(ap) \
+ ADMA_REGS((ap)->host->iomap[ADMA_MMIO_BAR], ap->port_no)
+
+enum {
+ ADMA_MMIO_BAR = 4,
+
+ ADMA_PORTS = 2,
+ ADMA_CPB_BYTES = 40,
+ ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
+ ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES,
+
+ ADMA_DMA_BOUNDARY = 0xffffffff,
+
+ /* global register offsets */
+ ADMA_MODE_LOCK = 0x00c7,
+
+ /* per-channel register offsets */
+ ADMA_CONTROL = 0x0000, /* ADMA control */
+ ADMA_STATUS = 0x0002, /* ADMA status */
+ ADMA_CPB_COUNT = 0x0004, /* CPB count */
+ ADMA_CPB_CURRENT = 0x000c, /* current CPB address */
+ ADMA_CPB_NEXT = 0x000c, /* next CPB address */
+ ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */
+ ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */
+ ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */
+
+ /* ADMA_CONTROL register bits */
+ aNIEN = (1 << 8), /* irq mask: 1==masked */
+ aGO = (1 << 7), /* packet trigger ("Go!") */
+ aRSTADM = (1 << 5), /* ADMA logic reset */
+ aPIOMD4 = 0x0003, /* PIO mode 4 */
+
+ /* ADMA_STATUS register bits */
+ aPSD = (1 << 6),
+ aUIRQ = (1 << 4),
+ aPERR = (1 << 0),
+
+ /* CPB bits */
+ cDONE = (1 << 0),
+ cATERR = (1 << 3),
+
+ cVLD = (1 << 0),
+ cDAT = (1 << 2),
+ cIEN = (1 << 3),
+
+ /* PRD bits */
+ pORD = (1 << 4),
+ pDIRO = (1 << 5),
+ pEND = (1 << 7),
+
+ /* ATA register flags */
+ rIGN = (1 << 5),
+ rEND = (1 << 7),
+
+ /* ATA register addresses */
+ ADMA_REGS_CONTROL = 0x0e,
+ ADMA_REGS_SECTOR_COUNT = 0x12,
+ ADMA_REGS_LBA_LOW = 0x13,
+ ADMA_REGS_LBA_MID = 0x14,
+ ADMA_REGS_LBA_HIGH = 0x15,
+ ADMA_REGS_DEVICE = 0x16,
+ ADMA_REGS_COMMAND = 0x17,
+
+ /* PCI device IDs */
+ board_1841_idx = 0, /* ADMA 2-port controller */
+};
+
+typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t;
+
+struct adma_port_priv {
+ u8 *pkt;
+ dma_addr_t pkt_dma;
+ adma_state_t state;
+};
+
+static int adma_ata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static int adma_port_start(struct ata_port *ap);
+static void adma_port_stop(struct ata_port *ap);
+static void adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void adma_freeze(struct ata_port *ap);
+static void adma_thaw(struct ata_port *ap);
+static int adma_prereset(struct ata_link *link, unsigned long deadline);
+
+static struct scsi_host_template adma_ata_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .dma_boundary = ADMA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations adma_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = adma_check_atapi_dma,
+ .qc_prep = adma_qc_prep,
+ .qc_issue = adma_qc_issue,
+
+ .freeze = adma_freeze,
+ .thaw = adma_thaw,
+ .prereset = adma_prereset,
+
+ .port_start = adma_port_start,
+ .port_stop = adma_port_stop,
+};
+
+static struct ata_port_info adma_port_info[] = {
+ /* board_1841_idx */
+ {
+ .flags = ATA_FLAG_SLAVE_POSS |
+ ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
+ ATA_FLAG_PIO_POLLING,
+ .pio_mask = 0x10, /* pio4 */
+ .udma_mask = ATA_UDMA4,
+ .port_ops = &adma_ata_ops,
+ },
+};
+
+static const struct pci_device_id adma_ata_pci_tbl[] = {
+ { PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver adma_ata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = adma_ata_pci_tbl,
+ .probe = adma_ata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return 1; /* ATAPI DMA not yet supported */
+}
+
+static void adma_reset_engine(struct ata_port *ap)
+{
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+
+ /* reset ADMA to idle state */
+ writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+ udelay(2);
+ writew(aPIOMD4, chan + ADMA_CONTROL);
+ udelay(2);
+}
+
+static void adma_reinit_engine(struct ata_port *ap)
+{
+ struct adma_port_priv *pp = ap->private_data;
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+
+ /* mask/clear ATA interrupts */
+ writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
+ ata_sff_check_status(ap);
+
+ /* reset the ADMA engine */
+ adma_reset_engine(ap);
+
+ /* set in-FIFO threshold to 0x100 */
+ writew(0x100, chan + ADMA_FIFO_IN);
+
+ /* set CPB pointer */
+ writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT);
+
+ /* set out-FIFO threshold to 0x100 */
+ writew(0x100, chan + ADMA_FIFO_OUT);
+
+ /* set CPB count */
+ writew(1, chan + ADMA_CPB_COUNT);
+
+ /* read/discard ADMA status */
+ readb(chan + ADMA_STATUS);
+}
+
+static inline void adma_enter_reg_mode(struct ata_port *ap)
+{
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+
+ writew(aPIOMD4, chan + ADMA_CONTROL);
+ readb(chan + ADMA_STATUS); /* flush */
+}
+
+static void adma_freeze(struct ata_port *ap)
+{
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+
+ /* mask/clear ATA interrupts */
+ writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
+ ata_sff_check_status(ap);
+
+ /* reset ADMA to idle state */
+ writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL);
+ udelay(2);
+ writew(aPIOMD4 | aNIEN, chan + ADMA_CONTROL);
+ udelay(2);
+}
+
+static void adma_thaw(struct ata_port *ap)
+{
+ adma_reinit_engine(ap);
+}
+
+static int adma_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct adma_port_priv *pp = ap->private_data;
+
+ if (pp->state != adma_state_idle) /* healthy paranoia */
+ pp->state = adma_state_mmio;
+ adma_reinit_engine(ap);
+
+ return ata_sff_prereset(link, deadline);
+}
+
+static int adma_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg;
+ struct ata_port *ap = qc->ap;
+ struct adma_port_priv *pp = ap->private_data;
+ u8 *buf = pp->pkt, *last_buf = NULL;
+ int i = (2 + buf[3]) * 8;
+ u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0);
+ unsigned int si;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr;
+ u32 len;
+
+ addr = (u32)sg_dma_address(sg);
+ *(__le32 *)(buf + i) = cpu_to_le32(addr);
+ i += 4;
+
+ len = sg_dma_len(sg) >> 3;
+ *(__le32 *)(buf + i) = cpu_to_le32(len);
+ i += 4;
+
+ last_buf = &buf[i];
+ buf[i++] = pFLAGS;
+ buf[i++] = qc->dev->dma_mode & 0xf;
+ buf[i++] = 0; /* pPKLW */
+ buf[i++] = 0; /* reserved */
+
+ *(__le32 *)(buf + i) =
+ (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
+ i += 4;
+
+ VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
+ (unsigned long)addr, len);
+ }
+
+ if (likely(last_buf))
+ *last_buf |= pEND;
+
+ return i;
+}
+
+static void adma_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct adma_port_priv *pp = qc->ap->private_data;
+ u8 *buf = pp->pkt;
+ u32 pkt_dma = (u32)pp->pkt_dma;
+ int i = 0;
+
+ VPRINTK("ENTER\n");
+
+ adma_enter_reg_mode(qc->ap);
+ if (qc->tf.protocol != ATA_PROT_DMA) {
+ ata_sff_qc_prep(qc);
+ return;
+ }
+
+ buf[i++] = 0; /* Response flags */
+ buf[i++] = 0; /* reserved */
+ buf[i++] = cVLD | cDAT | cIEN;
+ i++; /* cLEN, gets filled in below */
+
+ *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */
+ i += 4; /* cNCPB */
+ i += 4; /* cPRD, gets filled in below */
+
+ buf[i++] = 0; /* reserved */
+ buf[i++] = 0; /* reserved */
+ buf[i++] = 0; /* reserved */
+ buf[i++] = 0; /* reserved */
+
+ /* ATA registers; must be a multiple of 4 */
+ buf[i++] = qc->tf.device;
+ buf[i++] = ADMA_REGS_DEVICE;
+ if ((qc->tf.flags & ATA_TFLAG_LBA48)) {
+ buf[i++] = qc->tf.hob_nsect;
+ buf[i++] = ADMA_REGS_SECTOR_COUNT;
+ buf[i++] = qc->tf.hob_lbal;
+ buf[i++] = ADMA_REGS_LBA_LOW;
+ buf[i++] = qc->tf.hob_lbam;
+ buf[i++] = ADMA_REGS_LBA_MID;
+ buf[i++] = qc->tf.hob_lbah;
+ buf[i++] = ADMA_REGS_LBA_HIGH;
+ }
+ buf[i++] = qc->tf.nsect;
+ buf[i++] = ADMA_REGS_SECTOR_COUNT;
+ buf[i++] = qc->tf.lbal;
+ buf[i++] = ADMA_REGS_LBA_LOW;
+ buf[i++] = qc->tf.lbam;
+ buf[i++] = ADMA_REGS_LBA_MID;
+ buf[i++] = qc->tf.lbah;
+ buf[i++] = ADMA_REGS_LBA_HIGH;
+ buf[i++] = 0;
+ buf[i++] = ADMA_REGS_CONTROL;
+ buf[i++] = rIGN;
+ buf[i++] = 0;
+ buf[i++] = qc->tf.command;
+ buf[i++] = ADMA_REGS_COMMAND | rEND;
+
+ buf[3] = (i >> 3) - 2; /* cLEN */
+ *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */
+
+ i = adma_fill_sg(qc);
+ wmb(); /* flush PRDs and pkt to memory */
+#if 0
+ /* dump out CPB + PRDs for debug */
+ {
+ int j, len = 0;
+ static char obuf[2048];
+ for (j = 0; j < i; ++j) {
+ len += sprintf(obuf+len, "%02x ", buf[j]);
+ if ((j & 7) == 7) {
+ printk("%s\n", obuf);
+ len = 0;
+ }
+ }
+ if (len)
+ printk("%s\n", obuf);
+ }
+#endif
+}
+
+static inline void adma_packet_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+
+ VPRINTK("ENTER, ap %p\n", ap);
+
+ /* fire up the ADMA engine */
+ writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
+}
+
+static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct adma_port_priv *pp = qc->ap->private_data;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ pp->state = adma_state_pkt;
+ adma_packet_start(qc);
+ return 0;
+
+ case ATAPI_PROT_DMA:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+
+ pp->state = adma_state_mmio;
+ return ata_sff_qc_issue(qc);
+}
+
+static inline unsigned int adma_intr_pkt(struct ata_host *host)
+{
+ unsigned int handled = 0, port_no;
+
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ struct ata_port *ap = host->ports[port_no];
+ struct adma_port_priv *pp;
+ struct ata_queued_cmd *qc;
+ void __iomem *chan = ADMA_PORT_REGS(ap);
+ u8 status = readb(chan + ADMA_STATUS);
+
+ if (status == 0)
+ continue;
+ handled = 1;
+ adma_enter_reg_mode(ap);
+ if (ap->flags & ATA_FLAG_DISABLED)
+ continue;
+ pp = ap->private_data;
+ if (!pp || pp->state != adma_state_pkt)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ if (status & aPERR)
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ else if ((status & (aPSD | aUIRQ)))
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (pp->pkt[0] & cATERR)
+ qc->err_mask |= AC_ERR_DEV;
+ else if (pp->pkt[0] != cDONE)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (!qc->err_mask)
+ ata_qc_complete(qc);
+ else {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi,
+ "ADMA-status 0x%02X", status);
+ ata_ehi_push_desc(ehi,
+ "pkt[0] 0x%02X", pp->pkt[0]);
+
+ if (qc->err_mask == AC_ERR_DEV)
+ ata_port_abort(ap);
+ else
+ ata_port_freeze(ap);
+ }
+ }
+ }
+ return handled;
+}
+
+static inline unsigned int adma_intr_mmio(struct ata_host *host)
+{
+ unsigned int handled = 0, port_no;
+
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ struct ata_port *ap;
+ ap = host->ports[port_no];
+ if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
+ struct ata_queued_cmd *qc;
+ struct adma_port_priv *pp = ap->private_data;
+ if (!pp || pp->state != adma_state_mmio)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+
+ /* check main status, clearing INTRQ */
+ u8 status = ata_sff_check_status(ap);
+ if ((status & ATA_BUSY))
+ continue;
+ DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+ ap->print_id, qc->tf.protocol, status);
+
+ /* complete taskfile transaction */
+ pp->state = adma_state_idle;
+ qc->err_mask |= ac_err_mask(status);
+ if (!qc->err_mask)
+ ata_qc_complete(qc);
+ else {
+ struct ata_eh_info *ehi =
+ &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi,
+ "status 0x%02X", status);
+
+ if (qc->err_mask == AC_ERR_DEV)
+ ata_port_abort(ap);
+ else
+ ata_port_freeze(ap);
+ }
+ handled = 1;
+ }
+ }
+ }
+ return handled;
+}
+
+static irqreturn_t adma_intr(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int handled = 0;
+
+ VPRINTK("ENTER\n");
+
+ spin_lock(&host->lock);
+ handled = adma_intr_pkt(host) | adma_intr_mmio(host);
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+ port->cmd_addr =
+ port->data_addr = base + 0x000;
+ port->error_addr =
+ port->feature_addr = base + 0x004;
+ port->nsect_addr = base + 0x008;
+ port->lbal_addr = base + 0x00c;
+ port->lbam_addr = base + 0x010;
+ port->lbah_addr = base + 0x014;
+ port->device_addr = base + 0x018;
+ port->status_addr =
+ port->command_addr = base + 0x01c;
+ port->altstatus_addr =
+ port->ctl_addr = base + 0x038;
+}
+
+static int adma_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct adma_port_priv *pp;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+ adma_enter_reg_mode(ap);
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+ pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
+ GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+ /* paranoia? */
+ if ((pp->pkt_dma & 7) != 0) {
+ printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
+ (u32)pp->pkt_dma);
+ return -ENOMEM;
+ }
+ memset(pp->pkt, 0, ADMA_PKT_BYTES);
+ ap->private_data = pp;
+ adma_reinit_engine(ap);
+ return 0;
+}
+
+static void adma_port_stop(struct ata_port *ap)
+{
+ adma_reset_engine(ap);
+}
+
+static void adma_host_init(struct ata_host *host, unsigned int chip_id)
+{
+ unsigned int port_no;
+
+ /* enable/lock aGO operation */
+ writeb(7, host->iomap[ADMA_MMIO_BAR] + ADMA_MODE_LOCK);
+
+ /* reset the ADMA logic */
+ for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
+ adma_reset_engine(host->ports[port_no]);
+}
+
+static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
+{
+ int rc;
+
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ return 0;
+}
+
+static int adma_ata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int rc, port_no;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
+ return -ENODEV;
+
+ rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+ mmio_base = host->iomap[ADMA_MMIO_BAR];
+
+ rc = adma_set_dma_masks(pdev, mmio_base);
+ if (rc)
+ return rc;
+
+ for (port_no = 0; port_no < ADMA_PORTS; ++port_no) {
+ struct ata_port *ap = host->ports[port_no];
+ void __iomem *port_base = ADMA_ATA_REGS(mmio_base, port_no);
+ unsigned int offset = port_base - mmio_base;
+
+ adma_ata_setup_port(&ap->ioaddr, port_base);
+
+ ata_port_pbar_desc(ap, ADMA_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, ADMA_MMIO_BAR, offset, "port");
+ }
+
+ /* initialize adapter */
+ adma_host_init(host, board_idx);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, adma_intr, IRQF_SHARED,
+ &adma_ata_sht);
+}
+
+static int __init adma_ata_init(void)
+{
+ return pci_register_driver(&adma_ata_pci_driver);
+}
+
+static void __exit adma_ata_exit(void)
+{
+ pci_unregister_driver(&adma_ata_pci_driver);
+}
+
+MODULE_AUTHOR("Mark Lord");
+MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(adma_ata_init);
+module_exit(adma_ata_exit);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
new file mode 100644
index 0000000..1a56db9
--- /dev/null
+++ b/drivers/ata/sata_fsl.c
@@ -0,0 +1,1411 @@
+/*
+ * drivers/ata/sata_fsl.c
+ *
+ * Freescale 3.0Gbps SATA device driver
+ *
+ * Author: Ashish Kalra <ashish.kalra@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Copyright (c) 2006-2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+#include <linux/of_platform.h>
+
+/* Controller information */
+enum {
+ SATA_FSL_QUEUE_DEPTH = 16,
+ SATA_FSL_MAX_PRD = 63,
+ SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
+ SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
+
+ SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_PMP | ATA_FLAG_NCQ),
+
+ SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
+ SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
+ SATA_FSL_CMD_SLOT_SIZE = (SATA_FSL_MAX_CMDS * SATA_FSL_CMD_HDR_SIZE),
+
+ /*
+ * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
+ * chained indirect PRDEs upto a max count of 63.
+ * We are allocating an array of 63 PRDEs contigiously, but PRDE#15 will
+ * be setup as an indirect descriptor, pointing to it's next
+ * (contigious) PRDE. Though chained indirect PRDE arrays are
+ * supported,it will be more efficient to use a direct PRDT and
+ * a single chain/link to indirect PRDE array/PRDT.
+ */
+
+ SATA_FSL_CMD_DESC_CFIS_SZ = 32,
+ SATA_FSL_CMD_DESC_SFIS_SZ = 32,
+ SATA_FSL_CMD_DESC_ACMD_SZ = 16,
+ SATA_FSL_CMD_DESC_RSRVD = 16,
+
+ SATA_FSL_CMD_DESC_SIZE = (SATA_FSL_CMD_DESC_CFIS_SZ +
+ SATA_FSL_CMD_DESC_SFIS_SZ +
+ SATA_FSL_CMD_DESC_ACMD_SZ +
+ SATA_FSL_CMD_DESC_RSRVD +
+ SATA_FSL_MAX_PRD * 16),
+
+ SATA_FSL_CMD_DESC_OFFSET_TO_PRDT =
+ (SATA_FSL_CMD_DESC_CFIS_SZ +
+ SATA_FSL_CMD_DESC_SFIS_SZ +
+ SATA_FSL_CMD_DESC_ACMD_SZ +
+ SATA_FSL_CMD_DESC_RSRVD),
+
+ SATA_FSL_CMD_DESC_AR_SZ = (SATA_FSL_CMD_DESC_SIZE * SATA_FSL_MAX_CMDS),
+ SATA_FSL_PORT_PRIV_DMA_SZ = (SATA_FSL_CMD_SLOT_SIZE +
+ SATA_FSL_CMD_DESC_AR_SZ),
+
+ /*
+ * MPC8315 has two SATA controllers, SATA1 & SATA2
+ * (one port per controller)
+ * MPC837x has 2/4 controllers, one port per controller
+ */
+
+ SATA_FSL_MAX_PORTS = 1,
+
+ SATA_FSL_IRQ_FLAG = IRQF_SHARED,
+};
+
+/*
+* Host Controller command register set - per port
+*/
+enum {
+ CQ = 0,
+ CA = 8,
+ CC = 0x10,
+ CE = 0x18,
+ DE = 0x20,
+ CHBA = 0x24,
+ HSTATUS = 0x28,
+ HCONTROL = 0x2C,
+ CQPMP = 0x30,
+ SIGNATURE = 0x34,
+ ICC = 0x38,
+
+ /*
+ * Host Status Register (HStatus) bitdefs
+ */
+ ONLINE = (1 << 31),
+ GOING_OFFLINE = (1 << 30),
+ BIST_ERR = (1 << 29),
+
+ FATAL_ERR_HC_MASTER_ERR = (1 << 18),
+ FATAL_ERR_PARITY_ERR_TX = (1 << 17),
+ FATAL_ERR_PARITY_ERR_RX = (1 << 16),
+ FATAL_ERR_DATA_UNDERRUN = (1 << 13),
+ FATAL_ERR_DATA_OVERRUN = (1 << 12),
+ FATAL_ERR_CRC_ERR_TX = (1 << 11),
+ FATAL_ERR_CRC_ERR_RX = (1 << 10),
+ FATAL_ERR_FIFO_OVRFL_TX = (1 << 9),
+ FATAL_ERR_FIFO_OVRFL_RX = (1 << 8),
+
+ FATAL_ERROR_DECODE = FATAL_ERR_HC_MASTER_ERR |
+ FATAL_ERR_PARITY_ERR_TX |
+ FATAL_ERR_PARITY_ERR_RX |
+ FATAL_ERR_DATA_UNDERRUN |
+ FATAL_ERR_DATA_OVERRUN |
+ FATAL_ERR_CRC_ERR_TX |
+ FATAL_ERR_CRC_ERR_RX |
+ FATAL_ERR_FIFO_OVRFL_TX | FATAL_ERR_FIFO_OVRFL_RX,
+
+ INT_ON_FATAL_ERR = (1 << 5),
+ INT_ON_PHYRDY_CHG = (1 << 4),
+
+ INT_ON_SIGNATURE_UPDATE = (1 << 3),
+ INT_ON_SNOTIFY_UPDATE = (1 << 2),
+ INT_ON_SINGL_DEVICE_ERR = (1 << 1),
+ INT_ON_CMD_COMPLETE = 1,
+
+ INT_ON_ERROR = INT_ON_FATAL_ERR |
+ INT_ON_PHYRDY_CHG | INT_ON_SINGL_DEVICE_ERR,
+
+ /*
+ * Host Control Register (HControl) bitdefs
+ */
+ HCONTROL_ONLINE_PHY_RST = (1 << 31),
+ HCONTROL_FORCE_OFFLINE = (1 << 30),
+ HCONTROL_PARITY_PROT_MOD = (1 << 14),
+ HCONTROL_DPATH_PARITY = (1 << 12),
+ HCONTROL_SNOOP_ENABLE = (1 << 10),
+ HCONTROL_PMP_ATTACHED = (1 << 9),
+ HCONTROL_COPYOUT_STATFIS = (1 << 8),
+ IE_ON_FATAL_ERR = (1 << 5),
+ IE_ON_PHYRDY_CHG = (1 << 4),
+ IE_ON_SIGNATURE_UPDATE = (1 << 3),
+ IE_ON_SNOTIFY_UPDATE = (1 << 2),
+ IE_ON_SINGL_DEVICE_ERR = (1 << 1),
+ IE_ON_CMD_COMPLETE = 1,
+
+ DEFAULT_PORT_IRQ_ENABLE_MASK = IE_ON_FATAL_ERR | IE_ON_PHYRDY_CHG |
+ IE_ON_SIGNATURE_UPDATE |
+ IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE,
+
+ EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31),
+ DATA_SNOOP_ENABLE = (1 << 22),
+};
+
+/*
+ * SATA Superset Registers
+ */
+enum {
+ SSTATUS = 0,
+ SERROR = 4,
+ SCONTROL = 8,
+ SNOTIFY = 0xC,
+};
+
+/*
+ * Control Status Register Set
+ */
+enum {
+ TRANSCFG = 0,
+ TRANSSTATUS = 4,
+ LINKCFG = 8,
+ LINKCFG1 = 0xC,
+ LINKCFG2 = 0x10,
+ LINKSTATUS = 0x14,
+ LINKSTATUS1 = 0x18,
+ PHYCTRLCFG = 0x1C,
+ COMMANDSTAT = 0x20,
+};
+
+/* PHY (link-layer) configuration control */
+enum {
+ PHY_BIST_ENABLE = 0x01,
+};
+
+/*
+ * Command Header Table entry, i.e, command slot
+ * 4 Dwords per command slot, command header size == 64 Dwords.
+ */
+struct cmdhdr_tbl_entry {
+ u32 cda;
+ u32 prde_fis_len;
+ u32 ttl;
+ u32 desc_info;
+};
+
+/*
+ * Description information bitdefs
+ */
+enum {
+ VENDOR_SPECIFIC_BIST = (1 << 10),
+ CMD_DESC_SNOOP_ENABLE = (1 << 9),
+ FPDMA_QUEUED_CMD = (1 << 8),
+ SRST_CMD = (1 << 7),
+ BIST = (1 << 6),
+ ATAPI_CMD = (1 << 5),
+};
+
+/*
+ * Command Descriptor
+ */
+struct command_desc {
+ u8 cfis[8 * 4];
+ u8 sfis[8 * 4];
+ u8 acmd[4 * 4];
+ u8 fill[4 * 4];
+ u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
+ u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
+};
+
+/*
+ * Physical region table descriptor(PRD)
+ */
+
+struct prde {
+ u32 dba;
+ u8 fill[2 * 4];
+ u32 ddc_and_ext;
+};
+
+/*
+ * ata_port private data
+ * This is our per-port instance data.
+ */
+struct sata_fsl_port_priv {
+ struct cmdhdr_tbl_entry *cmdslot;
+ dma_addr_t cmdslot_paddr;
+ struct command_desc *cmdentry;
+ dma_addr_t cmdentry_paddr;
+};
+
+/*
+ * ata_port->host_set private data
+ */
+struct sata_fsl_host_priv {
+ void __iomem *hcr_base;
+ void __iomem *ssr_base;
+ void __iomem *csr_base;
+ int irq;
+};
+
+static inline unsigned int sata_fsl_tag(unsigned int tag,
+ void __iomem *hcr_base)
+{
+ /* We let libATA core do actual (queue) tag allocation */
+
+ /* all non NCQ/queued commands should have tag#0 */
+ if (ata_tag_internal(tag)) {
+ DPRINTK("mapping internal cmds to tag#0\n");
+ return 0;
+ }
+
+ if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
+ DPRINTK("tag %d invalid : out of range\n", tag);
+ return 0;
+ }
+
+ if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
+ DPRINTK("tag %d invalid : in use!!\n", tag);
+ return 0;
+ }
+
+ return tag;
+}
+
+static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
+ unsigned int tag, u32 desc_info,
+ u32 data_xfer_len, u8 num_prde,
+ u8 fis_len)
+{
+ dma_addr_t cmd_descriptor_address;
+
+ cmd_descriptor_address = pp->cmdentry_paddr +
+ tag * SATA_FSL_CMD_DESC_SIZE;
+
+ /* NOTE: both data_xfer_len & fis_len are Dword counts */
+
+ pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address);
+ pp->cmdslot[tag].prde_fis_len =
+ cpu_to_le32((num_prde << 16) | (fis_len << 2));
+ pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
+ pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
+
+ VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
+ pp->cmdslot[tag].cda,
+ pp->cmdslot[tag].prde_fis_len,
+ pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info);
+
+}
+
+static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
+ u32 *ttl, dma_addr_t cmd_desc_paddr)
+{
+ struct scatterlist *sg;
+ unsigned int num_prde = 0;
+ u32 ttl_dwords = 0;
+
+ /*
+ * NOTE : direct & indirect prdt's are contigiously allocated
+ */
+ struct prde *prd = (struct prde *)&((struct command_desc *)
+ cmd_desc)->prdt;
+
+ struct prde *prd_ptr_to_indirect_ext = NULL;
+ unsigned indirect_ext_segment_sz = 0;
+ dma_addr_t indirect_ext_segment_paddr;
+ unsigned int si;
+
+ VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
+
+ indirect_ext_segment_paddr = cmd_desc_paddr +
+ SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t sg_addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%x, sg_len = %d\n",
+ sg_addr, sg_len);
+
+ /* warn if each s/g element is not dword aligned */
+ if (sg_addr & 0x03)
+ ata_port_printk(qc->ap, KERN_ERR,
+ "s/g addr unaligned : 0x%x\n", sg_addr);
+ if (sg_len & 0x03)
+ ata_port_printk(qc->ap, KERN_ERR,
+ "s/g len unaligned : 0x%x\n", sg_len);
+
+ if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
+ sg_next(sg) != NULL) {
+ VPRINTK("setting indirect prde\n");
+ prd_ptr_to_indirect_ext = prd;
+ prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
+ indirect_ext_segment_sz = 0;
+ ++prd;
+ ++num_prde;
+ }
+
+ ttl_dwords += sg_len;
+ prd->dba = cpu_to_le32(sg_addr);
+ prd->ddc_and_ext =
+ cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03));
+
+ VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
+ ttl_dwords, prd->dba, prd->ddc_and_ext);
+
+ ++num_prde;
+ ++prd;
+ if (prd_ptr_to_indirect_ext)
+ indirect_ext_segment_sz += sg_len;
+ }
+
+ if (prd_ptr_to_indirect_ext) {
+ /* set indirect extension flag along with indirect ext. size */
+ prd_ptr_to_indirect_ext->ddc_and_ext =
+ cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG |
+ DATA_SNOOP_ENABLE |
+ (indirect_ext_segment_sz & ~0x03)));
+ }
+
+ *ttl = ttl_dwords;
+ return num_prde;
+}
+
+static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sata_fsl_port_priv *pp = ap->private_data;
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+ struct command_desc *cd;
+ u32 desc_info = CMD_DESC_SNOOP_ENABLE;
+ u32 num_prde = 0;
+ u32 ttl_dwords = 0;
+ dma_addr_t cd_paddr;
+
+ cd = (struct command_desc *)pp->cmdentry + tag;
+ cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
+
+ VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
+ cd->cfis[0], cd->cfis[1], cd->cfis[2]);
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n",
+ cd->cfis[3], cd->cfis[11]);
+ }
+
+ /* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
+ if (ata_is_atapi(qc->tf.protocol)) {
+ desc_info |= ATAPI_CMD;
+ memset((void *)&cd->acmd, 0, 32);
+ memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+ }
+
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ num_prde = sata_fsl_fill_sg(qc, (void *)cd,
+ &ttl_dwords, cd_paddr);
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ desc_info |= FPDMA_QUEUED_CMD;
+
+ sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords,
+ num_prde, 5);
+
+ VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+ desc_info, ttl_dwords, num_prde);
+}
+
+static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+
+ VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
+ ioread32(CQ + hcr_base),
+ ioread32(CA + hcr_base),
+ ioread32(CE + hcr_base), ioread32(CC + hcr_base));
+
+ iowrite32(qc->dev->link->pmp, CQPMP + hcr_base);
+
+ /* Simply queue command to the controller/device */
+ iowrite32(1 << tag, CQ + hcr_base);
+
+ VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n",
+ tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
+
+ VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
+ ioread32(CE + hcr_base),
+ ioread32(DE + hcr_base),
+ ioread32(CC + hcr_base),
+ ioread32(COMMANDSTAT + host_priv->csr_base));
+
+ return 0;
+}
+
+static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct sata_fsl_port_priv *pp = qc->ap->private_data;
+ struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned int tag = sata_fsl_tag(qc->tag, hcr_base);
+ struct command_desc *cd;
+
+ cd = pp->cmdentry + tag;
+
+ ata_tf_from_fis(cd->sfis, &qc->result_tf);
+ return true;
+}
+
+static int sata_fsl_scr_write(struct ata_link *link,
+ unsigned int sc_reg_in, u32 val)
+{
+ struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
+ void __iomem *ssr_base = host_priv->ssr_base;
+ unsigned int sc_reg;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ case SCR_ERROR:
+ case SCR_CONTROL:
+ case SCR_ACTIVE:
+ sc_reg = sc_reg_in;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg);
+
+ iowrite32(val, ssr_base + (sc_reg * 4));
+ return 0;
+}
+
+static int sata_fsl_scr_read(struct ata_link *link,
+ unsigned int sc_reg_in, u32 *val)
+{
+ struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
+ void __iomem *ssr_base = host_priv->ssr_base;
+ unsigned int sc_reg;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ case SCR_ERROR:
+ case SCR_CONTROL:
+ case SCR_ACTIVE:
+ sc_reg = sc_reg_in;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg);
+
+ *val = ioread32(ssr_base + (sc_reg * 4));
+ return 0;
+}
+
+static void sata_fsl_freeze(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
+ ioread32(CQ + hcr_base),
+ ioread32(CA + hcr_base),
+ ioread32(CE + hcr_base), ioread32(DE + hcr_base));
+ VPRINTK("CmdStat = 0x%x\n",
+ ioread32(host_priv->csr_base + COMMANDSTAT));
+
+ /* disable interrupts on the controller/port */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+ VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n",
+ ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+static void sata_fsl_thaw(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ /* ack. any pending IRQs for this controller/port */
+ temp = ioread32(hcr_base + HSTATUS);
+
+ VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F));
+
+ if (temp & 0x3F)
+ iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+ /* enable interrupts on the controller/port */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
+
+ VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n",
+ ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
+}
+
+static void sata_fsl_pmp_attach(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL);
+}
+
+static void sata_fsl_pmp_detach(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ temp = ioread32(hcr_base + HCONTROL);
+ temp &= ~HCONTROL_PMP_ATTACHED;
+ iowrite32(temp, hcr_base + HCONTROL);
+
+ /* enable interrupts on the controller/port */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
+
+}
+
+static int sata_fsl_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct sata_fsl_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+ GFP_KERNEL);
+ if (!mem) {
+ kfree(pp);
+ return -ENOMEM;
+ }
+ memset(mem, 0, SATA_FSL_PORT_PRIV_DMA_SZ);
+
+ pp->cmdslot = mem;
+ pp->cmdslot_paddr = mem_dma;
+
+ mem += SATA_FSL_CMD_SLOT_SIZE;
+ mem_dma += SATA_FSL_CMD_SLOT_SIZE;
+
+ pp->cmdentry = mem;
+ pp->cmdentry_paddr = mem_dma;
+
+ ap->private_data = pp;
+
+ VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n",
+ pp->cmdslot_paddr, pp->cmdentry_paddr);
+
+ /* Now, update the CHBA register in host controller cmd register set */
+ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+ /*
+ * Now, we can bring the controller on-line & also initiate
+ * the COMINIT sequence, we simply return here and the boot-probing
+ * & device discovery process is re-initiated by libATA using a
+ * Softreset EH (dummy) session. Hence, boot probing and device
+ * discovey will be part of sata_fsl_softreset() callback.
+ */
+
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
+
+ VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
+
+#ifdef CONFIG_MPC8315_DS
+ /*
+ * Workaround for 8315DS board 3gbps link-up issue,
+ * currently limit SATA port to GEN1 speed
+ */
+ sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
+ temp &= ~(0xF << 4);
+ temp |= (0x1 << 4);
+ sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
+
+ sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
+ dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
+ temp);
+#endif
+
+ return 0;
+}
+
+static void sata_fsl_port_stop(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct sata_fsl_port_priv *pp = ap->private_data;
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ /*
+ * Force host controller to go off-line, aborting current operations
+ */
+ temp = ioread32(hcr_base + HCONTROL);
+ temp &= ~HCONTROL_ONLINE_PHY_RST;
+ temp |= HCONTROL_FORCE_OFFLINE;
+ iowrite32(temp, hcr_base + HCONTROL);
+
+ /* Poll for controller to go offline - should happen immediately */
+ ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1);
+
+ ap->private_data = NULL;
+ dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
+ pp->cmdslot, pp->cmdslot_paddr);
+
+ kfree(pp);
+}
+
+static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ struct ata_taskfile tf;
+ u32 temp;
+
+ temp = ioread32(hcr_base + SIGNATURE);
+
+ VPRINTK("raw sig = 0x%x\n", temp);
+ VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+ tf.lbah = (temp >> 24) & 0xff;
+ tf.lbam = (temp >> 16) & 0xff;
+ tf.lbal = (temp >> 8) & 0xff;
+ tf.nsect = temp & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
+{
+ /* FIXME: Never skip softreset, sata_fsl_softreset() is
+ * combination of soft and hard resets. sata_fsl_softreset()
+ * needs to be splitted into soft and hard resets.
+ */
+ return 0;
+}
+
+static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct sata_fsl_port_priv *pp = ap->private_data;
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ int pmp = sata_srst_pmp(link);
+ u32 temp;
+ struct ata_taskfile tf;
+ u8 *cfis;
+ u32 Serror;
+ int i = 0;
+ unsigned long start_jiffies;
+
+ DPRINTK("in xx_softreset\n");
+
+ if (pmp != SATA_PMP_CTRL_PORT)
+ goto issue_srst;
+
+try_offline_again:
+ /*
+ * Force host controller to go off-line, aborting current operations
+ */
+ temp = ioread32(hcr_base + HCONTROL);
+ temp &= ~HCONTROL_ONLINE_PHY_RST;
+ iowrite32(temp, hcr_base + HCONTROL);
+
+ /* Poll for controller to go offline */
+ temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 500);
+
+ if (temp & ONLINE) {
+ ata_port_printk(ap, KERN_ERR,
+ "Softreset failed, not off-lined %d\n", i);
+
+ /*
+ * Try to offline controller atleast twice
+ */
+ i++;
+ if (i == 2)
+ goto err;
+ else
+ goto try_offline_again;
+ }
+
+ DPRINTK("softreset, controller off-lined\n");
+ VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+ /*
+ * PHY reset should remain asserted for atleast 1ms
+ */
+ msleep(1);
+
+ /*
+ * Now, bring the host controller online again, this can take time
+ * as PHY reset and communication establishment, 1st D2H FIS and
+ * device signature update is done, on safe side assume 500ms
+ * NOTE : Host online status may be indicated immediately!!
+ */
+
+ temp = ioread32(hcr_base + HCONTROL);
+ temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE);
+ temp |= HCONTROL_PMP_ATTACHED;
+ iowrite32(temp, hcr_base + HCONTROL);
+
+ temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500);
+
+ if (!(temp & ONLINE)) {
+ ata_port_printk(ap, KERN_ERR,
+ "Softreset failed, not on-lined\n");
+ goto err;
+ }
+
+ DPRINTK("softreset, controller off-lined & on-lined\n");
+ VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+ /*
+ * First, wait for the PHYRDY change to occur before waiting for
+ * the signature, and also verify if SStatus indicates device
+ * presence
+ */
+
+ temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0, 1, 500);
+ if ((!(temp & 0x10)) || ata_link_offline(link)) {
+ ata_port_printk(ap, KERN_WARNING,
+ "No Device OR PHYRDY change,Hstatus = 0x%x\n",
+ ioread32(hcr_base + HSTATUS));
+ *class = ATA_DEV_NONE;
+ goto out;
+ }
+
+ /*
+ * Wait for the first D2H from device,i.e,signature update notification
+ */
+ start_jiffies = jiffies;
+ temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0x10,
+ 500, jiffies_to_msecs(deadline - start_jiffies));
+
+ if ((temp & 0xFF) != 0x18) {
+ ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
+ *class = ATA_DEV_NONE;
+ goto out;
+ } else {
+ ata_port_printk(ap, KERN_INFO,
+ "Signature Update detected @ %d msecs\n",
+ jiffies_to_msecs(jiffies - start_jiffies));
+ }
+
+ /*
+ * Send a device reset (SRST) explicitly on command slot #0
+ * Check : will the command queue (reg) be cleared during offlining ??
+ * Also we will be online only if Phy commn. has been established
+ * and device presence has been detected, therefore if we have
+ * reached here, we can send a command to the target device
+ */
+
+issue_srst:
+ DPRINTK("Sending SRST/device reset\n");
+
+ ata_tf_init(link->device, &tf);
+ cfis = (u8 *) &pp->cmdentry->cfis;
+
+ /* device reset/SRST is a control register update FIS, uses tag0 */
+ sata_fsl_setup_cmd_hdr_entry(pp, 0,
+ SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
+
+ tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */
+ ata_tf_to_fis(&tf, pmp, 0, cfis);
+
+ DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
+ cfis[0], cfis[1], cfis[2], cfis[3]);
+
+ /*
+ * Queue SRST command to the controller/device, ensure that no
+ * other commands are active on the controller/device
+ */
+
+ DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
+ ioread32(CQ + hcr_base),
+ ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+ iowrite32(0xFFFF, CC + hcr_base);
+ iowrite32(1, CQ + hcr_base);
+
+ temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
+ if (temp & 0x1) {
+ ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n");
+
+ DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
+ ioread32(CQ + hcr_base),
+ ioread32(CA + hcr_base), ioread32(CC + hcr_base));
+
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
+
+ DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ DPRINTK("Serror = 0x%x\n", Serror);
+ goto err;
+ }
+
+ msleep(1);
+
+ /*
+ * SATA device enters reset state after receving a Control register
+ * FIS with SRST bit asserted and it awaits another H2D Control reg.
+ * FIS with SRST bit cleared, then the device does internal diags &
+ * initialization, followed by indicating it's initialization status
+ * using ATA signature D2H register FIS to the host controller.
+ */
+
+ sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
+
+ tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */
+ ata_tf_to_fis(&tf, pmp, 0, cfis);
+
+ if (pmp != SATA_PMP_CTRL_PORT)
+ iowrite32(pmp, CQPMP + hcr_base);
+ iowrite32(1, CQ + hcr_base);
+ msleep(150); /* ?? */
+
+ /*
+ * The above command would have signalled an interrupt on command
+ * complete, which needs special handling, by clearing the Nth
+ * command bit of the CCreg
+ */
+ iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */
+
+ DPRINTK("SATA FSL : Now checking device signature\n");
+
+ *class = ATA_DEV_NONE;
+
+ /* Verify if SStatus indicates device presence */
+ if (ata_link_online(link)) {
+ /*
+ * if we are here, device presence has been detected,
+ * 1st D2H FIS would have been received, but sfis in
+ * command desc. is not updated, but signature register
+ * would have been updated
+ */
+
+ *class = sata_fsl_dev_classify(ap);
+
+ DPRINTK("class = %d\n", *class);
+ VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC));
+ VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
+ }
+
+out:
+ return 0;
+
+err:
+ return -EIO;
+}
+
+static void sata_fsl_error_handler(struct ata_port *ap)
+{
+
+ DPRINTK("in xx_error_handler\n");
+ sata_pmp_error_handler(ap);
+
+}
+
+static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (qc->err_mask) {
+ /* make DMA engine forget about the failed command */
+
+ }
+}
+
+static void sata_fsl_error_intr(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 hstatus, dereg=0, cereg = 0, SError = 0;
+ unsigned int err_mask = 0, action = 0;
+ int freeze = 0, abort=0;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *qc = NULL;
+ struct ata_eh_info *ehi;
+
+ hstatus = ioread32(hcr_base + HSTATUS);
+ cereg = ioread32(hcr_base + CE);
+
+ /* first, analyze and record host port events */
+ link = &ap->link;
+ ehi = &link->eh_info;
+ ata_ehi_clear_desc(ehi);
+
+ /*
+ * Handle & Clear SError
+ */
+
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
+ if (unlikely(SError & 0xFFFF0000)) {
+ sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
+ }
+
+ DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
+ hstatus, cereg, ioread32(hcr_base + DE), SError);
+
+ /* handle fatal errors */
+ if (hstatus & FATAL_ERROR_DECODE) {
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ ehi->action |= ATA_EH_SOFTRESET;
+
+ /*
+ * Ignore serror in case of fatal errors as we always want
+ * to do a soft-reset of the FSL SATA controller. Analyzing
+ * serror may cause libata to schedule a hard-reset action,
+ * and hard-reset currently does not do controller
+ * offline/online, causing command timeouts and leads to an
+ * un-recoverable state, hence make libATA ignore
+ * autopsy in case of fatal errors.
+ */
+
+ ehi->flags |= ATA_EHI_NO_AUTOPSY;
+
+ freeze = 1;
+ }
+
+ /* Handle PHYRDY change notification */
+ if (hstatus & INT_ON_PHYRDY_CHG) {
+ DPRINTK("SATA FSL: PHYRDY change indication\n");
+
+ /* Setup a soft-reset EH action */
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "%s", "PHY RDY changed");
+ freeze = 1;
+ }
+
+ /* handle single device errors */
+ if (cereg) {
+ /*
+ * clear the command error, also clears queue to the device
+ * in error, and we can (re)issue commands to this device.
+ * When a device is in error all commands queued into the
+ * host controller and at the device are considered aborted
+ * and the queue for that device is stopped. Now, after
+ * clearing the device error, we can issue commands to the
+ * device to interrogate it to find the source of the error.
+ */
+ abort = 1;
+
+ DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
+ ioread32(hcr_base + CE), ioread32(hcr_base + DE));
+
+ /* find out the offending link and qc */
+ if (ap->nr_pmp_links) {
+ dereg = ioread32(hcr_base + DE);
+ iowrite32(dereg, hcr_base + DE);
+ iowrite32(cereg, hcr_base + CE);
+
+ if (dereg < ap->nr_pmp_links) {
+ link = &ap->pmp_link[dereg];
+ ehi = &link->eh_info;
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ /*
+ * We should consider this as non fatal error,
+ * and TF must be updated as done below.
+ */
+
+ err_mask |= AC_ERR_DEV;
+
+ } else {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_HARDRESET;
+ freeze = 1;
+ }
+ } else {
+ dereg = ioread32(hcr_base + DE);
+ iowrite32(dereg, hcr_base + DE);
+ iowrite32(cereg, hcr_base + CE);
+
+ qc = ata_qc_from_tag(ap, link->active_tag);
+ /*
+ * We should consider this as non fatal error,
+ * and TF must be updated as done below.
+ */
+ err_mask |= AC_ERR_DEV;
+ }
+ }
+
+ /* record error info */
+ if (qc) {
+ qc->err_mask |= err_mask;
+ } else
+ ehi->err_mask |= err_mask;
+
+ ehi->action |= action;
+
+ /* freeze or abort */
+ if (freeze)
+ ata_port_freeze(ap);
+ else if (abort) {
+ if (qc)
+ ata_link_abort(qc->dev->link);
+ else
+ ata_port_abort(ap);
+ }
+}
+
+static void sata_fsl_host_intr(struct ata_port *ap)
+{
+ struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 hstatus, qc_active = 0;
+ struct ata_queued_cmd *qc;
+ u32 SError;
+
+ hstatus = ioread32(hcr_base + HSTATUS);
+
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
+
+ if (unlikely(SError & 0xFFFF0000)) {
+ DPRINTK("serror @host_intr : 0x%x\n", SError);
+ sata_fsl_error_intr(ap);
+
+ }
+
+ if (unlikely(hstatus & INT_ON_ERROR)) {
+ DPRINTK("error interrupt!!\n");
+ sata_fsl_error_intr(ap);
+ return;
+ }
+
+ /* Read command completed register */
+ qc_active = ioread32(hcr_base + CC);
+
+ VPRINTK("Status of all queues :\n");
+ VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n",
+ qc_active,
+ ioread32(hcr_base + CA),
+ ioread32(hcr_base + CE),
+ ioread32(hcr_base + CQ),
+ ap->qc_active);
+
+ if (qc_active & ap->qc_active) {
+ int i;
+ /* clear CC bit, this will also complete the interrupt */
+ iowrite32(qc_active, hcr_base + CC);
+
+ DPRINTK("Status of all queues :\n");
+ DPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+ qc_active, ioread32(hcr_base + CA),
+ ioread32(hcr_base + CE));
+
+ for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
+ if (qc_active & (1 << i)) {
+ qc = ata_qc_from_tag(ap, i);
+ if (qc) {
+ ata_qc_complete(qc);
+ }
+ DPRINTK
+ ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
+ i, ioread32(hcr_base + CC),
+ ioread32(hcr_base + CA));
+ }
+ }
+ return;
+
+ } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) {
+ iowrite32(1, hcr_base + CC);
+ qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
+
+ DPRINTK("completing non-ncq cmd, CC=0x%x\n",
+ ioread32(hcr_base + CC));
+
+ if (qc) {
+ ata_qc_complete(qc);
+ }
+ } else {
+ /* Spurious Interrupt!! */
+ DPRINTK("spurious interrupt!!, CC = 0x%x\n",
+ ioread32(hcr_base + CC));
+ iowrite32(qc_active, hcr_base + CC);
+ return;
+ }
+}
+
+static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 interrupt_enables;
+ unsigned handled = 0;
+ struct ata_port *ap;
+
+ /* ack. any pending IRQs for this controller/port */
+ interrupt_enables = ioread32(hcr_base + HSTATUS);
+ interrupt_enables &= 0x3F;
+
+ DPRINTK("interrupt status 0x%x\n", interrupt_enables);
+
+ if (!interrupt_enables)
+ return IRQ_NONE;
+
+ spin_lock(&host->lock);
+
+ /* Assuming one port per host controller */
+
+ ap = host->ports[0];
+ if (ap) {
+ sata_fsl_host_intr(ap);
+ } else {
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port 0\n");
+ }
+
+ iowrite32(interrupt_enables, hcr_base + HSTATUS);
+ handled = 1;
+
+ spin_unlock(&host->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Multiple ports are represented by multiple SATA controllers with
+ * one port per controller
+ */
+static int sata_fsl_init_controller(struct ata_host *host)
+{
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ u32 temp;
+
+ /*
+ * NOTE : We cannot bring the controller online before setting
+ * the CHBA, hence main controller initialization is done as
+ * part of the port_start() callback
+ */
+
+ /* ack. any pending IRQs for this controller/port */
+ temp = ioread32(hcr_base + HSTATUS);
+ if (temp & 0x3F)
+ iowrite32((temp & 0x3F), hcr_base + HSTATUS);
+
+ /* Keep interrupts disabled on the controller */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
+
+ /* Disable interrupt coalescing control(icc), for the moment */
+ DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC));
+ iowrite32(0x01000000, hcr_base + ICC);
+
+ /* clear error registers, SError is cleared by libATA */
+ iowrite32(0x00000FFFF, hcr_base + CE);
+ iowrite32(0x00000FFFF, hcr_base + DE);
+
+ /*
+ * host controller will be brought on-line, during xx_port_start()
+ * callback, that should also initiate the OOB, COMINIT sequence
+ */
+
+ DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+
+ return 0;
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_fsl_sht = {
+ ATA_NCQ_SHT("sata_fsl"),
+ .can_queue = SATA_FSL_QUEUE_DEPTH,
+ .sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations sata_fsl_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_prep = sata_fsl_qc_prep,
+ .qc_issue = sata_fsl_qc_issue,
+ .qc_fill_rtf = sata_fsl_qc_fill_rtf,
+
+ .scr_read = sata_fsl_scr_read,
+ .scr_write = sata_fsl_scr_write,
+
+ .freeze = sata_fsl_freeze,
+ .thaw = sata_fsl_thaw,
+ .prereset = sata_fsl_prereset,
+ .softreset = sata_fsl_softreset,
+ .pmp_softreset = sata_fsl_softreset,
+ .error_handler = sata_fsl_error_handler,
+ .post_internal_cmd = sata_fsl_post_internal_cmd,
+
+ .port_start = sata_fsl_port_start,
+ .port_stop = sata_fsl_port_stop,
+
+ .pmp_attach = sata_fsl_pmp_attach,
+ .pmp_detach = sata_fsl_pmp_detach,
+};
+
+static const struct ata_port_info sata_fsl_port_info[] = {
+ {
+ .flags = SATA_FSL_HOST_FLAGS,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .udma_mask = 0x7f, /* udma 0-6 */
+ .port_ops = &sata_fsl_ops,
+ },
+};
+
+static int sata_fsl_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int retval = 0;
+ void __iomem *hcr_base = NULL;
+ void __iomem *ssr_base = NULL;
+ void __iomem *csr_base = NULL;
+ struct sata_fsl_host_priv *host_priv = NULL;
+ int irq;
+ struct ata_host *host;
+
+ struct ata_port_info pi = sata_fsl_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+
+ dev_printk(KERN_INFO, &ofdev->dev,
+ "Sata FSL Platform/CSB Driver init\n");
+
+ hcr_base = of_iomap(ofdev->node, 0);
+ if (!hcr_base)
+ goto error_exit_with_cleanup;
+
+ ssr_base = hcr_base + 0x100;
+ csr_base = hcr_base + 0x140;
+
+ DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
+ DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
+ DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
+
+ host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
+ if (!host_priv)
+ goto error_exit_with_cleanup;
+
+ host_priv->hcr_base = hcr_base;
+ host_priv->ssr_base = ssr_base;
+ host_priv->csr_base = csr_base;
+
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ if (irq < 0) {
+ dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
+ goto error_exit_with_cleanup;
+ }
+ host_priv->irq = irq;
+
+ /* allocate host structure */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS);
+
+ /* host->iomap is not used currently */
+ host->private_data = host_priv;
+
+ /* initialize host controller */
+ sata_fsl_init_controller(host);
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG,
+ &sata_fsl_sht);
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ return 0;
+
+error_exit_with_cleanup:
+
+ if (hcr_base)
+ iounmap(hcr_base);
+ if (host_priv)
+ kfree(host_priv);
+
+ return retval;
+}
+
+static int sata_fsl_remove(struct of_device *ofdev)
+{
+ struct ata_host *host = dev_get_drvdata(&ofdev->dev);
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+
+ ata_host_detach(host);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ irq_dispose_mapping(host_priv->irq);
+ iounmap(host_priv->hcr_base);
+ kfree(host_priv);
+
+ return 0;
+}
+
+static struct of_device_id fsl_sata_match[] = {
+ {
+ .compatible = "fsl,pq-sata",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_sata_match);
+
+static struct of_platform_driver fsl_sata_driver = {
+ .name = "fsl-sata",
+ .match_table = fsl_sata_match,
+ .probe = sata_fsl_probe,
+ .remove = sata_fsl_remove,
+};
+
+static int __init sata_fsl_init(void)
+{
+ of_register_platform_driver(&fsl_sata_driver);
+ return 0;
+}
+
+static void __exit sata_fsl_exit(void)
+{
+ of_unregister_platform_driver(&fsl_sata_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
+MODULE_VERSION("1.10");
+
+module_init(sata_fsl_init);
+module_exit(sata_fsl_exit);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
new file mode 100644
index 0000000..fbbd87c
--- /dev/null
+++ b/drivers/ata/sata_inic162x.c
@@ -0,0 +1,935 @@
+/*
+ * sata_inic162x.c - Driver for Initio 162x SATA controllers
+ *
+ * Copyright 2006 SUSE Linux Products GmbH
+ * Copyright 2006 Tejun Heo <teheo@novell.com>
+ *
+ * This file is released under GPL v2.
+ *
+ * This controller is eccentric and easily locks up if something isn't
+ * right. Documentation is available at initio's website but it only
+ * documents registers (not programming model).
+ *
+ * This driver has interesting history. The first version was written
+ * from the documentation and a 2.4 IDE driver posted on a Taiwan
+ * company, which didn't use any IDMA features and couldn't handle
+ * LBA48. The resulting driver couldn't handle LBA48 devices either
+ * making it pretty useless.
+ *
+ * After a while, initio picked the driver up, renamed it to
+ * sata_initio162x, updated it to use IDMA for ATA DMA commands and
+ * posted it on their website. It only used ATA_PROT_DMA for IDMA and
+ * attaching both devices and issuing IDMA and !IDMA commands
+ * simultaneously broke it due to PIRQ masking interaction but it did
+ * show how to use the IDMA (ADMA + some initio specific twists)
+ * engine.
+ *
+ * Then, I picked up their changes again and here's the usable driver
+ * which uses IDMA for everything. Everything works now including
+ * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some
+ * issues tho. Result Tf is not resported properly, NCQ isn't
+ * supported yet and CD/DVD writing works with DMA assisted PIO
+ * protocol (which, for native SATA devices, shouldn't cause any
+ * noticeable difference).
+ *
+ * Anyways, so, here's finally a working driver for inic162x. Enjoy!
+ *
+ * initio: If you guys wanna improve the driver regarding result TF
+ * access and other stuff, please feel free to contact me. I'll be
+ * happy to assist.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_device.h>
+
+#define DRV_NAME "sata_inic162x"
+#define DRV_VERSION "0.4"
+
+enum {
+ MMIO_BAR_PCI = 5,
+ MMIO_BAR_CARDBUS = 1,
+
+ NR_PORTS = 2,
+
+ IDMA_CPB_TBL_SIZE = 4 * 32,
+
+ INIC_DMA_BOUNDARY = 0xffffff,
+
+ HOST_ACTRL = 0x08,
+ HOST_CTL = 0x7c,
+ HOST_STAT = 0x7e,
+ HOST_IRQ_STAT = 0xbc,
+ HOST_IRQ_MASK = 0xbe,
+
+ PORT_SIZE = 0x40,
+
+ /* registers for ATA TF operation */
+ PORT_TF_DATA = 0x00,
+ PORT_TF_FEATURE = 0x01,
+ PORT_TF_NSECT = 0x02,
+ PORT_TF_LBAL = 0x03,
+ PORT_TF_LBAM = 0x04,
+ PORT_TF_LBAH = 0x05,
+ PORT_TF_DEVICE = 0x06,
+ PORT_TF_COMMAND = 0x07,
+ PORT_TF_ALT_STAT = 0x08,
+ PORT_IRQ_STAT = 0x09,
+ PORT_IRQ_MASK = 0x0a,
+ PORT_PRD_CTL = 0x0b,
+ PORT_PRD_ADDR = 0x0c,
+ PORT_PRD_XFERLEN = 0x10,
+ PORT_CPB_CPBLAR = 0x18,
+ PORT_CPB_PTQFIFO = 0x1c,
+
+ /* IDMA register */
+ PORT_IDMA_CTL = 0x14,
+ PORT_IDMA_STAT = 0x16,
+
+ PORT_RPQ_FIFO = 0x1e,
+ PORT_RPQ_CNT = 0x1f,
+
+ PORT_SCR = 0x20,
+
+ /* HOST_CTL bits */
+ HCTL_LEDEN = (1 << 3), /* enable LED operation */
+ HCTL_IRQOFF = (1 << 8), /* global IRQ off */
+ HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
+ HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
+ HCTL_PWRDWN = (1 << 12), /* power down PHYs */
+ HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
+ HCTL_RPGSEL = (1 << 15), /* register page select */
+
+ HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
+ HCTL_RPGSEL,
+
+ /* HOST_IRQ_(STAT|MASK) bits */
+ HIRQ_PORT0 = (1 << 0),
+ HIRQ_PORT1 = (1 << 1),
+ HIRQ_SOFT = (1 << 14),
+ HIRQ_GLOBAL = (1 << 15), /* STAT only */
+
+ /* PORT_IRQ_(STAT|MASK) bits */
+ PIRQ_OFFLINE = (1 << 0), /* device unplugged */
+ PIRQ_ONLINE = (1 << 1), /* device plugged */
+ PIRQ_COMPLETE = (1 << 2), /* completion interrupt */
+ PIRQ_FATAL = (1 << 3), /* fatal error */
+ PIRQ_ATA = (1 << 4), /* ATA interrupt */
+ PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */
+ PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
+
+ PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
+ PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
+ PIRQ_MASK_FREEZE = 0xff,
+
+ /* PORT_PRD_CTL bits */
+ PRD_CTL_START = (1 << 0),
+ PRD_CTL_WR = (1 << 3),
+ PRD_CTL_DMAEN = (1 << 7), /* DMA enable */
+
+ /* PORT_IDMA_CTL bits */
+ IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */
+ IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
+ IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
+ IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
+
+ /* PORT_IDMA_STAT bits */
+ IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
+ IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
+ IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
+ IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
+ IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
+ IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
+ IDMA_STAT_DONE = (1 << 7), /* ADMA done */
+
+ IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
+
+ /* CPB Control Flags*/
+ CPB_CTL_VALID = (1 << 0), /* CPB valid */
+ CPB_CTL_QUEUED = (1 << 1), /* queued command */
+ CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
+ CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
+ CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
+
+ /* CPB Response Flags */
+ CPB_RESP_DONE = (1 << 0), /* ATA command complete */
+ CPB_RESP_REL = (1 << 1), /* ATA release */
+ CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
+ CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
+ CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
+ CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
+ CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
+ CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
+
+ /* PRD Control Flags */
+ PRD_DRAIN = (1 << 1), /* ignore data excess */
+ PRD_CDB = (1 << 2), /* atapi packet command pointer */
+ PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
+ PRD_DMA = (1 << 4), /* data transfer method */
+ PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
+ PRD_IOM = (1 << 6), /* io/memory transfer */
+ PRD_END = (1 << 7), /* APRD chain end */
+};
+
+/* Comman Parameter Block */
+struct inic_cpb {
+ u8 resp_flags; /* Response Flags */
+ u8 error; /* ATA Error */
+ u8 status; /* ATA Status */
+ u8 ctl_flags; /* Control Flags */
+ __le32 len; /* Total Transfer Length */
+ __le32 prd; /* First PRD pointer */
+ u8 rsvd[4];
+ /* 16 bytes */
+ u8 feature; /* ATA Feature */
+ u8 hob_feature; /* ATA Ex. Feature */
+ u8 device; /* ATA Device/Head */
+ u8 mirctl; /* Mirror Control */
+ u8 nsect; /* ATA Sector Count */
+ u8 hob_nsect; /* ATA Ex. Sector Count */
+ u8 lbal; /* ATA Sector Number */
+ u8 hob_lbal; /* ATA Ex. Sector Number */
+ u8 lbam; /* ATA Cylinder Low */
+ u8 hob_lbam; /* ATA Ex. Cylinder Low */
+ u8 lbah; /* ATA Cylinder High */
+ u8 hob_lbah; /* ATA Ex. Cylinder High */
+ u8 command; /* ATA Command */
+ u8 ctl; /* ATA Control */
+ u8 slave_error; /* Slave ATA Error */
+ u8 slave_status; /* Slave ATA Status */
+ /* 32 bytes */
+} __packed;
+
+/* Physical Region Descriptor */
+struct inic_prd {
+ __le32 mad; /* Physical Memory Address */
+ __le16 len; /* Transfer Length */
+ u8 rsvd;
+ u8 flags; /* Control Flags */
+} __packed;
+
+struct inic_pkt {
+ struct inic_cpb cpb;
+ struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */
+ u8 cdb[ATAPI_CDB_LEN];
+} __packed;
+
+struct inic_host_priv {
+ void __iomem *mmio_base;
+ u16 cached_hctl;
+};
+
+struct inic_port_priv {
+ struct inic_pkt *pkt;
+ dma_addr_t pkt_dma;
+ u32 *cpb_tbl;
+ dma_addr_t cpb_tbl_dma;
+};
+
+static struct scsi_host_template inic_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
+ .dma_boundary = INIC_DMA_BOUNDARY,
+};
+
+static const int scr_map[] = {
+ [SCR_STATUS] = 0,
+ [SCR_ERROR] = 1,
+ [SCR_CONTROL] = 2,
+};
+
+static void __iomem *inic_port_base(struct ata_port *ap)
+{
+ struct inic_host_priv *hpriv = ap->host->private_data;
+
+ return hpriv->mmio_base + ap->port_no * PORT_SIZE;
+}
+
+static void inic_reset_port(void __iomem *port_base)
+{
+ void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+
+ /* stop IDMA engine */
+ readw(idma_ctl); /* flush */
+ msleep(1);
+
+ /* mask IRQ and assert reset */
+ writew(IDMA_CTL_RST_IDMA, idma_ctl);
+ readw(idma_ctl); /* flush */
+ msleep(1);
+
+ /* release reset */
+ writew(0, idma_ctl);
+
+ /* clear irq */
+ writeb(0xff, port_base + PORT_IRQ_STAT);
+}
+
+static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
+{
+ void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
+ void __iomem *addr;
+
+ if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+ return -EINVAL;
+
+ addr = scr_addr + scr_map[sc_reg] * 4;
+ *val = readl(scr_addr + scr_map[sc_reg] * 4);
+
+ /* this controller has stuck DIAG.N, ignore it */
+ if (sc_reg == SCR_ERROR)
+ *val &= ~SERR_PHYRDY_CHG;
+ return 0;
+}
+
+static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
+{
+ void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
+
+ if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
+ return -EINVAL;
+
+ writel(val, scr_addr + scr_map[sc_reg] * 4);
+ return 0;
+}
+
+static void inic_stop_idma(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ readb(port_base + PORT_RPQ_FIFO);
+ readb(port_base + PORT_RPQ_CNT);
+ writew(0, port_base + PORT_IDMA_CTL);
+}
+
+static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct inic_port_priv *pp = ap->private_data;
+ struct inic_cpb *cpb = &pp->pkt->cpb;
+ bool freeze = false;
+
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
+ irq_stat, idma_stat);
+
+ inic_stop_idma(ap);
+
+ if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
+ ata_ehi_push_desc(ehi, "hotplug");
+ ata_ehi_hotplugged(ehi);
+ freeze = true;
+ }
+
+ if (idma_stat & IDMA_STAT_PERR) {
+ ata_ehi_push_desc(ehi, "PCI error");
+ freeze = true;
+ }
+
+ if (idma_stat & IDMA_STAT_CPBERR) {
+ ata_ehi_push_desc(ehi, "CPB error");
+
+ if (cpb->resp_flags & CPB_RESP_IGNORED) {
+ __ata_ehi_push_desc(ehi, " ignored");
+ ehi->err_mask |= AC_ERR_INVALID;
+ freeze = true;
+ }
+
+ if (cpb->resp_flags & CPB_RESP_ATA_ERR)
+ ehi->err_mask |= AC_ERR_DEV;
+
+ if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
+ __ata_ehi_push_desc(ehi, " spurious-intr");
+ ehi->err_mask |= AC_ERR_HSM;
+ freeze = true;
+ }
+
+ if (cpb->resp_flags &
+ (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
+ __ata_ehi_push_desc(ehi, " data-over/underflow");
+ ehi->err_mask |= AC_ERR_HSM;
+ freeze = true;
+ }
+ }
+
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+}
+
+static void inic_host_intr(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ u8 irq_stat;
+ u16 idma_stat;
+
+ /* read and clear IRQ status */
+ irq_stat = readb(port_base + PORT_IRQ_STAT);
+ writeb(irq_stat, port_base + PORT_IRQ_STAT);
+ idma_stat = readw(port_base + PORT_IDMA_STAT);
+
+ if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
+ inic_host_err_intr(ap, irq_stat, idma_stat);
+
+ if (unlikely(!qc))
+ goto spurious;
+
+ if (likely(idma_stat & IDMA_STAT_DONE)) {
+ inic_stop_idma(ap);
+
+ /* Depending on circumstances, device error
+ * isn't reported by IDMA, check it explicitly.
+ */
+ if (unlikely(readb(port_base + PORT_TF_COMMAND) &
+ (ATA_DF | ATA_ERR)))
+ qc->err_mask |= AC_ERR_DEV;
+
+ ata_qc_complete(qc);
+ return;
+ }
+
+ spurious:
+ ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
+ "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
+ qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
+}
+
+static irqreturn_t inic_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct inic_host_priv *hpriv = host->private_data;
+ u16 host_irq_stat;
+ int i, handled = 0;;
+
+ host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
+
+ if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
+ goto out;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < NR_PORTS; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (!(host_irq_stat & (HIRQ_PORT0 << i)))
+ continue;
+
+ if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
+ inic_host_intr(ap);
+ handled++;
+ } else {
+ if (ata_ratelimit())
+ dev_printk(KERN_ERR, host->dev, "interrupt "
+ "from disabled port %d (0x%x)\n",
+ i, host_irq_stat);
+ }
+ }
+
+ spin_unlock(&host->lock);
+
+ out:
+ return IRQ_RETVAL(handled);
+}
+
+static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ /* For some reason ATAPI_PROT_DMA doesn't work for some
+ * commands including writes and other misc ops. Use PIO
+ * protocol instead, which BTW is driven by the DMA engine
+ * anyway, so it shouldn't make much difference for native
+ * SATA devices.
+ */
+ if (atapi_cmd_type(qc->cdb[0]) == READ)
+ return 0;
+ return 1;
+}
+
+static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg;
+ unsigned int si;
+ u8 flags = 0;
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ flags |= PRD_WRITE;
+
+ if (ata_is_dma(qc->tf.protocol))
+ flags |= PRD_DMA;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ prd->mad = cpu_to_le32(sg_dma_address(sg));
+ prd->len = cpu_to_le16(sg_dma_len(sg));
+ prd->flags = flags;
+ prd++;
+ }
+
+ WARN_ON(!si);
+ prd[-1].flags |= PRD_END;
+}
+
+static void inic_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct inic_port_priv *pp = qc->ap->private_data;
+ struct inic_pkt *pkt = pp->pkt;
+ struct inic_cpb *cpb = &pkt->cpb;
+ struct inic_prd *prd = pkt->prd;
+ bool is_atapi = ata_is_atapi(qc->tf.protocol);
+ bool is_data = ata_is_data(qc->tf.protocol);
+ unsigned int cdb_len = 0;
+
+ VPRINTK("ENTER\n");
+
+ if (is_atapi)
+ cdb_len = qc->dev->cdb_len;
+
+ /* prepare packet, based on initio driver */
+ memset(pkt, 0, sizeof(struct inic_pkt));
+
+ cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
+ if (is_atapi || is_data)
+ cpb->ctl_flags |= CPB_CTL_DATA;
+
+ cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
+ cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
+
+ cpb->device = qc->tf.device;
+ cpb->feature = qc->tf.feature;
+ cpb->nsect = qc->tf.nsect;
+ cpb->lbal = qc->tf.lbal;
+ cpb->lbam = qc->tf.lbam;
+ cpb->lbah = qc->tf.lbah;
+
+ if (qc->tf.flags & ATA_TFLAG_LBA48) {
+ cpb->hob_feature = qc->tf.hob_feature;
+ cpb->hob_nsect = qc->tf.hob_nsect;
+ cpb->hob_lbal = qc->tf.hob_lbal;
+ cpb->hob_lbam = qc->tf.hob_lbam;
+ cpb->hob_lbah = qc->tf.hob_lbah;
+ }
+
+ cpb->command = qc->tf.command;
+ /* don't load ctl - dunno why. it's like that in the initio driver */
+
+ /* setup PRD for CDB */
+ if (is_atapi) {
+ memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
+ prd->mad = cpu_to_le32(pp->pkt_dma +
+ offsetof(struct inic_pkt, cdb));
+ prd->len = cpu_to_le16(cdb_len);
+ prd->flags = PRD_CDB | PRD_WRITE;
+ if (!is_data)
+ prd->flags |= PRD_END;
+ prd++;
+ }
+
+ /* setup sg table */
+ if (is_data)
+ inic_fill_sg(prd, qc);
+
+ pp->cpb_tbl[0] = pp->pkt_dma;
+}
+
+static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_base = inic_port_base(ap);
+
+ /* fire up the ADMA engine */
+ writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL);
+ writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
+ writeb(0, port_base + PORT_CPB_PTQFIFO);
+
+ return 0;
+}
+
+static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ tf->feature = readb(port_base + PORT_TF_FEATURE);
+ tf->nsect = readb(port_base + PORT_TF_NSECT);
+ tf->lbal = readb(port_base + PORT_TF_LBAL);
+ tf->lbam = readb(port_base + PORT_TF_LBAM);
+ tf->lbah = readb(port_base + PORT_TF_LBAH);
+ tf->device = readb(port_base + PORT_TF_DEVICE);
+ tf->command = readb(port_base + PORT_TF_COMMAND);
+}
+
+static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *rtf = &qc->result_tf;
+ struct ata_taskfile tf;
+
+ /* FIXME: Except for status and error, result TF access
+ * doesn't work. I tried reading from BAR0/2, CPB and BAR5.
+ * None works regardless of which command interface is used.
+ * For now return true iff status indicates device error.
+ * This means that we're reporting bogus sector for RW
+ * failures. Eeekk....
+ */
+ inic_tf_read(qc->ap, &tf);
+
+ if (!(tf.command & ATA_ERR))
+ return false;
+
+ rtf->command = tf.command;
+ rtf->feature = tf.feature;
+ return true;
+}
+
+static void inic_freeze(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
+ writeb(0xff, port_base + PORT_IRQ_STAT);
+}
+
+static void inic_thaw(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ writeb(0xff, port_base + PORT_IRQ_STAT);
+ writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
+}
+
+static int inic_check_ready(struct ata_link *link)
+{
+ void __iomem *port_base = inic_port_base(link->ap);
+
+ return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
+}
+
+/*
+ * SRST and SControl hardreset don't give valid signature on this
+ * controller. Only controller specific hardreset mechanism works.
+ */
+static int inic_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_base = inic_port_base(ap);
+ void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ int rc;
+
+ /* hammer it into sane state */
+ inic_reset_port(port_base);
+
+ writew(IDMA_CTL_RST_ATA, idma_ctl);
+ readw(idma_ctl); /* flush */
+ msleep(1);
+ writew(0, idma_ctl);
+
+ rc = sata_link_resume(link, timing, deadline);
+ if (rc) {
+ ata_link_printk(link, KERN_WARNING, "failed to resume "
+ "link after reset (errno=%d)\n", rc);
+ return rc;
+ }
+
+ *class = ATA_DEV_NONE;
+ if (ata_link_online(link)) {
+ struct ata_taskfile tf;
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, inic_check_ready);
+ /* link occupied, -ENODEV too is an error */
+ if (rc) {
+ ata_link_printk(link, KERN_WARNING, "device not ready "
+ "after hardreset (errno=%d)\n", rc);
+ return rc;
+ }
+
+ inic_tf_read(ap, &tf);
+ *class = ata_dev_classify(&tf);
+ }
+
+ return 0;
+}
+
+static void inic_error_handler(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ inic_reset_port(port_base);
+ ata_std_error_handler(ap);
+}
+
+static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ inic_reset_port(inic_port_base(qc->ap));
+}
+
+static void init_port(struct ata_port *ap)
+{
+ void __iomem *port_base = inic_port_base(ap);
+ struct inic_port_priv *pp = ap->private_data;
+
+ /* clear packet and CPB table */
+ memset(pp->pkt, 0, sizeof(struct inic_pkt));
+ memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
+
+ /* setup PRD and CPB lookup table addresses */
+ writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
+ writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
+}
+
+static int inic_port_resume(struct ata_port *ap)
+{
+ init_port(ap);
+ return 0;
+}
+
+static int inic_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct inic_port_priv *pp;
+ int rc;
+
+ /* alloc and initialize private data */
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+ ap->private_data = pp;
+
+ /* Alloc resources */
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
+ &pp->pkt_dma, GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+
+ pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
+ &pp->cpb_tbl_dma, GFP_KERNEL);
+ if (!pp->cpb_tbl)
+ return -ENOMEM;
+
+ init_port(ap);
+
+ return 0;
+}
+
+static struct ata_port_operations inic_port_ops = {
+ .inherits = &sata_port_ops,
+
+ .check_atapi_dma = inic_check_atapi_dma,
+ .qc_prep = inic_qc_prep,
+ .qc_issue = inic_qc_issue,
+ .qc_fill_rtf = inic_qc_fill_rtf,
+
+ .freeze = inic_freeze,
+ .thaw = inic_thaw,
+ .hardreset = inic_hardreset,
+ .error_handler = inic_error_handler,
+ .post_internal_cmd = inic_post_internal_cmd,
+
+ .scr_read = inic_scr_read,
+ .scr_write = inic_scr_write,
+
+ .port_resume = inic_port_resume,
+ .port_start = inic_port_start,
+};
+
+static struct ata_port_info inic_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &inic_port_ops
+};
+
+static int init_controller(void __iomem *mmio_base, u16 hctl)
+{
+ int i;
+ u16 val;
+
+ hctl &= ~HCTL_KNOWN_BITS;
+
+ /* Soft reset whole controller. Spec says reset duration is 3
+ * PCI clocks, be generous and give it 10ms.
+ */
+ writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
+ readw(mmio_base + HOST_CTL); /* flush */
+
+ for (i = 0; i < 10; i++) {
+ msleep(1);
+ val = readw(mmio_base + HOST_CTL);
+ if (!(val & HCTL_SOFTRST))
+ break;
+ }
+
+ if (val & HCTL_SOFTRST)
+ return -EIO;
+
+ /* mask all interrupts and reset ports */
+ for (i = 0; i < NR_PORTS; i++) {
+ void __iomem *port_base = mmio_base + i * PORT_SIZE;
+
+ writeb(0xff, port_base + PORT_IRQ_MASK);
+ inic_reset_port(port_base);
+ }
+
+ /* port IRQ is masked now, unmask global IRQ */
+ writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
+ val = readw(mmio_base + HOST_IRQ_MASK);
+ val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
+ writew(val, mmio_base + HOST_IRQ_MASK);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int inic_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct inic_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
+ if (rc)
+ return rc;
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
+
+static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
+ struct ata_host *host;
+ struct inic_host_priv *hpriv;
+ void __iomem * const *iomap;
+ int mmio_bar;
+ int i, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!host || !hpriv)
+ return -ENOMEM;
+
+ host->private_data = hpriv;
+
+ /* Acquire resources and fill host. Note that PCI and cardbus
+ * use different BARs.
+ */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
+ mmio_bar = MMIO_BAR_PCI;
+ else
+ mmio_bar = MMIO_BAR_CARDBUS;
+
+ rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
+ if (rc)
+ return rc;
+ host->iomap = iomap = pcim_iomap_table(pdev);
+ hpriv->mmio_base = iomap[mmio_bar];
+ hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
+
+ for (i = 0; i < NR_PORTS; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
+ ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
+ }
+
+ /* Set dma_mask. This devices doesn't support 64bit addressing. */
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+
+ /*
+ * This controller is braindamaged. dma_boundary is 0xffff
+ * like others but it will lock up the whole machine HARD if
+ * 65536 byte PRD entry is fed. Reduce maximum segment size.
+ */
+ rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to set the maximum segment size.\n");
+ return rc;
+ }
+
+ rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to initialize controller\n");
+ return rc;
+ }
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED,
+ &inic_sht);
+}
+
+static const struct pci_device_id inic_pci_tbl[] = {
+ { PCI_VDEVICE(INIT, 0x1622), },
+ { },
+};
+
+static struct pci_driver inic_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = inic_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = inic_pci_device_resume,
+#endif
+ .probe = inic_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int __init inic_init(void)
+{
+ return pci_register_driver(&inic_pci_driver);
+}
+
+static void __exit inic_exit(void)
+{
+ pci_unregister_driver(&inic_pci_driver);
+}
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(inic_init);
+module_exit(inic_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
new file mode 100644
index 0000000..8092e2b
--- /dev/null
+++ b/drivers/ata/sata_mv.c
@@ -0,0 +1,3489 @@
+/*
+ * sata_mv.c - Marvell SATA support
+ *
+ * Copyright 2008: Marvell Corporation, all rights reserved.
+ * Copyright 2005: EMC Corporation, all rights reserved.
+ * Copyright 2005 Red Hat, Inc. All rights reserved.
+ *
+ * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * sata_mv TODO list:
+ *
+ * --> Errata workaround for NCQ device errors.
+ *
+ * --> More errata workarounds for PCI-X.
+ *
+ * --> Complete a full errata audit for all chipsets to identify others.
+ *
+ * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
+ *
+ * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
+ *
+ * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
+ *
+ * --> Develop a low-power-consumption strategy, and implement it.
+ *
+ * --> [Experiment, low priority] Investigate interrupt coalescing.
+ * Quite often, especially with PCI Message Signalled Interrupts (MSI),
+ * the overhead reduced by interrupt mitigation is quite often not
+ * worth the latency cost.
+ *
+ * --> [Experiment, Marvell value added] Is it possible to use target
+ * mode to cross-connect two Linux boxes with Marvell cards? If so,
+ * creating LibATA target mode support would be very interesting.
+ *
+ * Target mode, for those without docs, is the ability to directly
+ * connect two SATA ports.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/mbus.h>
+#include <linux/bitops.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_mv"
+#define DRV_VERSION "1.24"
+
+enum {
+ /* BAR's are enumerated in terms of pci_resource_start() terms */
+ MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
+ MV_IO_BAR = 2, /* offset 0x18: IO space */
+ MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
+
+ MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
+ MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
+
+ MV_PCI_REG_BASE = 0,
+ MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
+ MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
+ MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
+ MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
+ MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
+ MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
+
+ MV_SATAHC0_REG_BASE = 0x20000,
+ MV_FLASH_CTL_OFS = 0x1046c,
+ MV_GPIO_PORT_CTL_OFS = 0x104f0,
+ MV_RESET_CFG_OFS = 0x180d8,
+
+ MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
+ MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
+ MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
+ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
+
+ MV_MAX_Q_DEPTH = 32,
+ MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
+
+ /* CRQB needs alignment on a 1KB boundary. Size == 1KB
+ * CRPB needs alignment on a 256B boundary. Size == 256B
+ * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
+ */
+ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
+ MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
+ MV_MAX_SG_CT = 256,
+ MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
+
+ /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
+ MV_PORT_HC_SHIFT = 2,
+ MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
+ /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
+ MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
+
+ /* Host Flags */
+ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
+ MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
+
+ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
+ ATA_FLAG_PIO_POLLING,
+
+ MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
+
+ MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ | ATA_FLAG_AN,
+
+ CRQB_FLAG_READ = (1 << 0),
+ CRQB_TAG_SHIFT = 1,
+ CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
+ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
+ CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
+ CRQB_CMD_ADDR_SHIFT = 8,
+ CRQB_CMD_CS = (0x2 << 11),
+ CRQB_CMD_LAST = (1 << 15),
+
+ CRPB_FLAG_STATUS_SHIFT = 8,
+ CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
+ CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
+
+ EPRD_FLAG_END_OF_TBL = (1 << 31),
+
+ /* PCI interface registers */
+
+ PCI_COMMAND_OFS = 0xc00,
+ PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
+
+ PCI_MAIN_CMD_STS_OFS = 0xd30,
+ STOP_PCI_MASTER = (1 << 2),
+ PCI_MASTER_EMPTY = (1 << 3),
+ GLOB_SFT_RST = (1 << 4),
+
+ MV_PCI_MODE_OFS = 0xd00,
+ MV_PCI_MODE_MASK = 0x30,
+
+ MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
+ MV_PCI_DISC_TIMER = 0xd04,
+ MV_PCI_MSI_TRIGGER = 0xc38,
+ MV_PCI_SERR_MASK = 0xc28,
+ MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
+ MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
+ MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
+ MV_PCI_ERR_ATTRIBUTE = 0x1d48,
+ MV_PCI_ERR_COMMAND = 0x1d50,
+
+ PCI_IRQ_CAUSE_OFS = 0x1d58,
+ PCI_IRQ_MASK_OFS = 0x1d5c,
+ PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
+
+ PCIE_IRQ_CAUSE_OFS = 0x1900,
+ PCIE_IRQ_MASK_OFS = 0x1910,
+ PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
+
+ /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
+ PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
+ PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
+ SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
+ SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
+ ERR_IRQ = (1 << 0), /* shift by port # */
+ DONE_IRQ = (1 << 1), /* shift by port # */
+ HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
+ HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
+ PCI_ERR = (1 << 18),
+ TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
+ TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
+ PORTS_0_3_COAL_DONE = (1 << 8),
+ PORTS_4_7_COAL_DONE = (1 << 17),
+ PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
+ GPIO_INT = (1 << 22),
+ SELF_INT = (1 << 23),
+ TWSI_INT = (1 << 24),
+ HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
+ HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
+ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
+
+ /* SATAHC registers */
+ HC_CFG_OFS = 0,
+
+ HC_IRQ_CAUSE_OFS = 0x14,
+ DMA_IRQ = (1 << 0), /* shift by port # */
+ HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
+ DEV_IRQ = (1 << 8), /* shift by port # */
+
+ /* Shadow block registers */
+ SHD_BLK_OFS = 0x100,
+ SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
+
+ /* SATA registers */
+ SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
+ SATA_ACTIVE_OFS = 0x350,
+ SATA_FIS_IRQ_CAUSE_OFS = 0x364,
+ SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
+
+ LTMODE_OFS = 0x30c,
+ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
+
+ PHY_MODE3 = 0x310,
+ PHY_MODE4 = 0x314,
+ PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
+ PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
+ PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
+ PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
+
+ PHY_MODE2 = 0x330,
+ SATA_IFCTL_OFS = 0x344,
+ SATA_TESTCTL_OFS = 0x348,
+ SATA_IFSTAT_OFS = 0x34c,
+ VENDOR_UNIQUE_FIS_OFS = 0x35c,
+
+ FISCFG_OFS = 0x360,
+ FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
+ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
+
+ MV5_PHY_MODE = 0x74,
+ MV5_LTMODE_OFS = 0x30,
+ MV5_PHY_CTL_OFS = 0x0C,
+ SATA_INTERFACE_CFG_OFS = 0x050,
+
+ MV_M2_PREAMP_MASK = 0x7e0,
+
+ /* Port registers */
+ EDMA_CFG_OFS = 0,
+ EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
+ EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
+ EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
+ EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
+ EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
+ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
+ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
+
+ EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
+ EDMA_ERR_IRQ_MASK_OFS = 0xc,
+ EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
+ EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
+ EDMA_ERR_DEV = (1 << 2), /* device error */
+ EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
+ EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
+ EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
+ EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
+ EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
+ EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
+ EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
+ EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
+ EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
+ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
+ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
+
+ EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
+ EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
+ EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
+ EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
+ EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
+
+ EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
+
+ EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
+ EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
+ EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
+ EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
+ EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
+ EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
+
+ EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
+
+ EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
+ EDMA_ERR_OVERRUN_5 = (1 << 5),
+ EDMA_ERR_UNDERRUN_5 = (1 << 6),
+
+ EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
+ EDMA_ERR_LNK_CTRL_RX_1 |
+ EDMA_ERR_LNK_CTRL_RX_3 |
+ EDMA_ERR_LNK_CTRL_TX,
+
+ EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
+ EDMA_ERR_PRD_PAR |
+ EDMA_ERR_DEV_DCON |
+ EDMA_ERR_DEV_CON |
+ EDMA_ERR_SERR |
+ EDMA_ERR_SELF_DIS |
+ EDMA_ERR_CRQB_PAR |
+ EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR |
+ EDMA_ERR_IORDY |
+ EDMA_ERR_LNK_CTRL_RX_2 |
+ EDMA_ERR_LNK_DATA_RX |
+ EDMA_ERR_LNK_DATA_TX |
+ EDMA_ERR_TRANS_PROTO,
+
+ EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
+ EDMA_ERR_PRD_PAR |
+ EDMA_ERR_DEV_DCON |
+ EDMA_ERR_DEV_CON |
+ EDMA_ERR_OVERRUN_5 |
+ EDMA_ERR_UNDERRUN_5 |
+ EDMA_ERR_SELF_DIS_5 |
+ EDMA_ERR_CRQB_PAR |
+ EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR |
+ EDMA_ERR_IORDY,
+
+ EDMA_REQ_Q_BASE_HI_OFS = 0x10,
+ EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
+
+ EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
+ EDMA_REQ_Q_PTR_SHIFT = 5,
+
+ EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
+ EDMA_RSP_Q_IN_PTR_OFS = 0x20,
+ EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
+ EDMA_RSP_Q_PTR_SHIFT = 3,
+
+ EDMA_CMD_OFS = 0x28, /* EDMA command register */
+ EDMA_EN = (1 << 0), /* enable EDMA */
+ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
+ EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
+
+ EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
+ EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
+ EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
+
+ EDMA_IORDY_TMOUT_OFS = 0x34,
+ EDMA_ARB_CFG_OFS = 0x38,
+
+ EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
+
+ GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
+
+ /* Host private flags (hp_flags) */
+ MV_HP_FLAG_MSI = (1 << 0),
+ MV_HP_ERRATA_50XXB0 = (1 << 1),
+ MV_HP_ERRATA_50XXB2 = (1 << 2),
+ MV_HP_ERRATA_60X1B2 = (1 << 3),
+ MV_HP_ERRATA_60X1C0 = (1 << 4),
+ MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
+ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
+ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
+ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
+ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
+ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
+
+ /* Port private flags (pp_flags) */
+ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
+ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
+ MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
+ MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
+};
+
+#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
+#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
+#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
+#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
+#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
+
+#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
+#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
+
+enum {
+ /* DMA boundary 0xffff is required by the s/g splitting
+ * we need on /length/ in mv_fill-sg().
+ */
+ MV_DMA_BOUNDARY = 0xffffU,
+
+ /* mask of register bits containing lower 32 bits
+ * of EDMA request queue DMA address
+ */
+ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
+
+ /* ditto, for response queue */
+ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
+};
+
+enum chip_type {
+ chip_504x,
+ chip_508x,
+ chip_5080,
+ chip_604x,
+ chip_608x,
+ chip_6042,
+ chip_7042,
+ chip_soc,
+};
+
+/* Command ReQuest Block: 32B */
+struct mv_crqb {
+ __le32 sg_addr;
+ __le32 sg_addr_hi;
+ __le16 ctrl_flags;
+ __le16 ata_cmd[11];
+};
+
+struct mv_crqb_iie {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 flags;
+ __le32 len;
+ __le32 ata_cmd[4];
+};
+
+/* Command ResPonse Block: 8B */
+struct mv_crpb {
+ __le16 id;
+ __le16 flags;
+ __le32 tmstmp;
+};
+
+/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
+struct mv_sg {
+ __le32 addr;
+ __le32 flags_size;
+ __le32 addr_hi;
+ __le32 reserved;
+};
+
+struct mv_port_priv {
+ struct mv_crqb *crqb;
+ dma_addr_t crqb_dma;
+ struct mv_crpb *crpb;
+ dma_addr_t crpb_dma;
+ struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
+ dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
+
+ unsigned int req_idx;
+ unsigned int resp_idx;
+
+ u32 pp_flags;
+ unsigned int delayed_eh_pmp_map;
+};
+
+struct mv_port_signal {
+ u32 amps;
+ u32 pre;
+};
+
+struct mv_host_priv {
+ u32 hp_flags;
+ u32 main_irq_mask;
+ struct mv_port_signal signal[8];
+ const struct mv_hw_ops *ops;
+ int n_ports;
+ void __iomem *base;
+ void __iomem *main_irq_cause_addr;
+ void __iomem *main_irq_mask_addr;
+ u32 irq_cause_ofs;
+ u32 irq_mask_ofs;
+ u32 unmask_all_irqs;
+ /*
+ * These consistent DMA memory pools give us guaranteed
+ * alignment for hardware-accessed data structures,
+ * and less memory waste in accomplishing the alignment.
+ */
+ struct dma_pool *crqb_pool;
+ struct dma_pool *crpb_pool;
+ struct dma_pool *sg_tbl_pool;
+};
+
+struct mv_hw_ops {
+ void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port);
+ void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
+ void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+ int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int n_hc);
+ void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
+ void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
+};
+
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
+static int mv_port_start(struct ata_port *ap);
+static void mv_port_stop(struct ata_port *ap);
+static int mv_qc_defer(struct ata_queued_cmd *qc);
+static void mv_qc_prep(struct ata_queued_cmd *qc);
+static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
+static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void mv_eh_freeze(struct ata_port *ap);
+static void mv_eh_thaw(struct ata_port *ap);
+static void mv6_dev_config(struct ata_device *dev);
+
+static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port);
+static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int n_hc);
+static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
+
+static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port);
+static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int n_hc);
+static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port_no);
+static int mv_stop_edma(struct ata_port *ap);
+static int mv_stop_edma_engine(void __iomem *port_mmio);
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
+
+static void mv_pmp_select(struct ata_port *ap, int pmp);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void mv_pmp_error_handler(struct ata_port *ap);
+static void mv_process_crpb_entries(struct ata_port *ap,
+ struct mv_port_priv *pp);
+
+/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
+ * because we have to allow room for worst case splitting of
+ * PRDs for 64K boundaries in mv_fill_sg().
+ */
+static struct scsi_host_template mv5_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = MV_MAX_SG_CT / 2,
+ .dma_boundary = MV_DMA_BOUNDARY,
+};
+
+static struct scsi_host_template mv6_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = MV_MAX_Q_DEPTH - 1,
+ .sg_tablesize = MV_MAX_SG_CT / 2,
+ .dma_boundary = MV_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations mv5_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .qc_defer = mv_qc_defer,
+ .qc_prep = mv_qc_prep,
+ .qc_issue = mv_qc_issue,
+
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+ .hardreset = mv_hardreset,
+ .error_handler = ata_std_error_handler, /* avoid SFF EH */
+ .post_internal_cmd = ATA_OP_NULL,
+
+ .scr_read = mv5_scr_read,
+ .scr_write = mv5_scr_write,
+
+ .port_start = mv_port_start,
+ .port_stop = mv_port_stop,
+};
+
+static struct ata_port_operations mv6_ops = {
+ .inherits = &mv5_ops,
+ .dev_config = mv6_dev_config,
+ .scr_read = mv_scr_read,
+ .scr_write = mv_scr_write,
+
+ .pmp_hardreset = mv_pmp_hardreset,
+ .pmp_softreset = mv_softreset,
+ .softreset = mv_softreset,
+ .error_handler = mv_pmp_error_handler,
+};
+
+static struct ata_port_operations mv_iie_ops = {
+ .inherits = &mv6_ops,
+ .dev_config = ATA_OP_NULL,
+ .qc_prep = mv_qc_prep_iie,
+};
+
+static const struct ata_port_info mv_port_info[] = {
+ { /* chip_504x */
+ .flags = MV_COMMON_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv5_ops,
+ },
+ { /* chip_508x */
+ .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv5_ops,
+ },
+ { /* chip_5080 */
+ .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv5_ops,
+ },
+ { /* chip_604x */
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv6_ops,
+ },
+ { /* chip_608x */
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv6_ops,
+ },
+ { /* chip_6042 */
+ .flags = MV_GENIIE_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
+ { /* chip_7042 */
+ .flags = MV_GENIIE_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
+ { /* chip_soc */
+ .flags = MV_GENIIE_FLAGS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
+};
+
+static const struct pci_device_id mv_pci_tbl[] = {
+ { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
+ { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
+ { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
+ /* RocketRAID 1720/174x have different identifiers */
+ { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
+ { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
+
+ { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
+ { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
+ { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
+ { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
+
+ { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
+
+ /* Adaptec 1430SA */
+ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
+
+ /* Marvell 7042 support */
+ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
+
+ /* Highpoint RocketRAID PCIe series */
+ { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
+ { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
+
+ { } /* terminate list */
+};
+
+static const struct mv_hw_ops mv5xxx_ops = {
+ .phy_errata = mv5_phy_errata,
+ .enable_leds = mv5_enable_leds,
+ .read_preamp = mv5_read_preamp,
+ .reset_hc = mv5_reset_hc,
+ .reset_flash = mv5_reset_flash,
+ .reset_bus = mv5_reset_bus,
+};
+
+static const struct mv_hw_ops mv6xxx_ops = {
+ .phy_errata = mv6_phy_errata,
+ .enable_leds = mv6_enable_leds,
+ .read_preamp = mv6_read_preamp,
+ .reset_hc = mv6_reset_hc,
+ .reset_flash = mv6_reset_flash,
+ .reset_bus = mv_reset_pci_bus,
+};
+
+static const struct mv_hw_ops mv_soc_ops = {
+ .phy_errata = mv6_phy_errata,
+ .enable_leds = mv_soc_enable_leds,
+ .read_preamp = mv_soc_read_preamp,
+ .reset_hc = mv_soc_reset_hc,
+ .reset_flash = mv_soc_reset_flash,
+ .reset_bus = mv_soc_reset_bus,
+};
+
+/*
+ * Functions
+ */
+
+static inline void writelfl(unsigned long data, void __iomem *addr)
+{
+ writel(data, addr);
+ (void) readl(addr); /* flush to avoid PCI posted write */
+}
+
+static inline unsigned int mv_hc_from_port(unsigned int port)
+{
+ return port >> MV_PORT_HC_SHIFT;
+}
+
+static inline unsigned int mv_hardport_from_port(unsigned int port)
+{
+ return port & MV_PORT_MASK;
+}
+
+/*
+ * Consolidate some rather tricky bit shift calculations.
+ * This is hot-path stuff, so not a function.
+ * Simple code, with two return values, so macro rather than inline.
+ *
+ * port is the sole input, in range 0..7.
+ * shift is one output, for use with main_irq_cause / main_irq_mask registers.
+ * hardport is the other output, in range 0..3.
+ *
+ * Note that port and hardport may be the same variable in some cases.
+ */
+#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
+{ \
+ shift = mv_hc_from_port(port) * HC_SHIFT; \
+ hardport = mv_hardport_from_port(port); \
+ shift += hardport * 2; \
+}
+
+static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
+{
+ return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
+}
+
+static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
+ unsigned int port)
+{
+ return mv_hc_base(base, mv_hc_from_port(port));
+}
+
+static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
+{
+ return mv_hc_base_from_port(base, port) +
+ MV_SATAHC_ARBTR_REG_SZ +
+ (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
+}
+
+static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
+{
+ void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
+ unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
+
+ return hc_mmio + ofs;
+}
+
+static inline void __iomem *mv_host_base(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ return hpriv->base;
+}
+
+static inline void __iomem *mv_ap_base(struct ata_port *ap)
+{
+ return mv_port_base(mv_host_base(ap->host), ap->port_no);
+}
+
+static inline int mv_get_hc_count(unsigned long port_flags)
+{
+ return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
+}
+
+static void mv_set_edma_ptrs(void __iomem *port_mmio,
+ struct mv_host_priv *hpriv,
+ struct mv_port_priv *pp)
+{
+ u32 index;
+
+ /*
+ * initialize request queue
+ */
+ pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
+ index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
+
+ WARN_ON(pp->crqb_dma & 0x3ff);
+ writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
+ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
+ port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+ writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+
+ /*
+ * initialize response queue
+ */
+ pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
+ index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
+
+ WARN_ON(pp->crpb_dma & 0xff);
+ writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+ writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
+ port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+}
+
+static void mv_set_main_irq_mask(struct ata_host *host,
+ u32 disable_bits, u32 enable_bits)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ u32 old_mask, new_mask;
+
+ old_mask = hpriv->main_irq_mask;
+ new_mask = (old_mask & ~disable_bits) | enable_bits;
+ if (new_mask != old_mask) {
+ hpriv->main_irq_mask = new_mask;
+ writelfl(new_mask, hpriv->main_irq_mask_addr);
+ }
+}
+
+static void mv_enable_port_irqs(struct ata_port *ap,
+ unsigned int port_bits)
+{
+ unsigned int shift, hardport, port = ap->port_no;
+ u32 disable_bits, enable_bits;
+
+ MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
+
+ disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
+ enable_bits = port_bits << shift;
+ mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
+}
+
+/**
+ * mv_start_dma - Enable eDMA engine
+ * @base: port base address
+ * @pp: port private data
+ *
+ * Verify the local cache of the eDMA state is accurate with a
+ * WARN_ON.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
+ struct mv_port_priv *pp, u8 protocol)
+{
+ int want_ncq = (protocol == ATA_PROT_NCQ);
+
+ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+ int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
+ if (want_ncq != using_ncq)
+ mv_stop_edma(ap);
+ }
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ int hardport = mv_hardport_from_port(ap->port_no);
+ void __iomem *hc_mmio = mv_hc_base_from_port(
+ mv_host_base(ap->host), ap->port_no);
+ u32 hc_irq_cause, ipending;
+
+ /* clear EDMA event indicators, if any */
+ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ /* clear EDMA interrupt indicator, if any */
+ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
+ ipending = (DEV_IRQ | DMA_IRQ) << hardport;
+ if (hc_irq_cause & ipending) {
+ writelfl(hc_irq_cause & ~ipending,
+ hc_mmio + HC_IRQ_CAUSE_OFS);
+ }
+
+ mv_edma_cfg(ap, want_ncq);
+
+ /* clear FIS IRQ Cause */
+ if (IS_GEN_IIE(hpriv))
+ writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
+
+ mv_set_edma_ptrs(port_mmio, hpriv, pp);
+ mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
+
+ writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
+ pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
+ }
+}
+
+static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
+ const int per_loop = 5, timeout = (15 * 1000 / per_loop);
+ int i;
+
+ /*
+ * Wait for the EDMA engine to finish transactions in progress.
+ * No idea what a good "timeout" value might be, but measurements
+ * indicate that it often requires hundreds of microseconds
+ * with two drives in-use. So we use the 15msec value above
+ * as a rough guess at what even more drives might require.
+ */
+ for (i = 0; i < timeout; ++i) {
+ u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
+ if ((edma_stat & empty_idle) == empty_idle)
+ break;
+ udelay(per_loop);
+ }
+ /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
+}
+
+/**
+ * mv_stop_edma_engine - Disable eDMA engine
+ * @port_mmio: io base address
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_stop_edma_engine(void __iomem *port_mmio)
+{
+ int i;
+
+ /* Disable eDMA. The disable bit auto clears. */
+ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
+
+ /* Wait for the chip to confirm eDMA is off. */
+ for (i = 10000; i > 0; i--) {
+ u32 reg = readl(port_mmio + EDMA_CMD_OFS);
+ if (!(reg & EDMA_EN))
+ return 0;
+ udelay(10);
+ }
+ return -EIO;
+}
+
+static int mv_stop_edma(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_port_priv *pp = ap->private_data;
+
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+ return 0;
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ mv_wait_for_edma_empty_idle(ap);
+ if (mv_stop_edma_engine(port_mmio)) {
+ ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#ifdef ATA_DEBUG
+static void mv_dump_mem(void __iomem *start, unsigned bytes)
+{
+ int b, w;
+ for (b = 0; b < bytes; ) {
+ DPRINTK("%p: ", start + b);
+ for (w = 0; b < bytes && w < 4; w++) {
+ printk("%08x ", readl(start + b));
+ b += sizeof(u32);
+ }
+ printk("\n");
+ }
+}
+#endif
+
+static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
+{
+#ifdef ATA_DEBUG
+ int b, w;
+ u32 dw;
+ for (b = 0; b < bytes; ) {
+ DPRINTK("%02x: ", b);
+ for (w = 0; b < bytes && w < 4; w++) {
+ (void) pci_read_config_dword(pdev, b, &dw);
+ printk("%08x ", dw);
+ b += sizeof(u32);
+ }
+ printk("\n");
+ }
+#endif
+}
+static void mv_dump_all_regs(void __iomem *mmio_base, int port,
+ struct pci_dev *pdev)
+{
+#ifdef ATA_DEBUG
+ void __iomem *hc_base = mv_hc_base(mmio_base,
+ port >> MV_PORT_HC_SHIFT);
+ void __iomem *port_base;
+ int start_port, num_ports, p, start_hc, num_hcs, hc;
+
+ if (0 > port) {
+ start_hc = start_port = 0;
+ num_ports = 8; /* shld be benign for 4 port devs */
+ num_hcs = 2;
+ } else {
+ start_hc = port >> MV_PORT_HC_SHIFT;
+ start_port = port;
+ num_ports = num_hcs = 1;
+ }
+ DPRINTK("All registers for port(s) %u-%u:\n", start_port,
+ num_ports > 1 ? num_ports - 1 : start_port);
+
+ if (NULL != pdev) {
+ DPRINTK("PCI config space regs:\n");
+ mv_dump_pci_cfg(pdev, 0x68);
+ }
+ DPRINTK("PCI regs:\n");
+ mv_dump_mem(mmio_base+0xc00, 0x3c);
+ mv_dump_mem(mmio_base+0xd00, 0x34);
+ mv_dump_mem(mmio_base+0xf00, 0x4);
+ mv_dump_mem(mmio_base+0x1d00, 0x6c);
+ for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
+ hc_base = mv_hc_base(mmio_base, hc);
+ DPRINTK("HC regs (HC %i):\n", hc);
+ mv_dump_mem(hc_base, 0x1c);
+ }
+ for (p = start_port; p < start_port + num_ports; p++) {
+ port_base = mv_port_base(mmio_base, p);
+ DPRINTK("EDMA regs (port %i):\n", p);
+ mv_dump_mem(port_base, 0x54);
+ DPRINTK("SATA regs (port %i):\n", p);
+ mv_dump_mem(port_base+0x300, 0x60);
+ }
+#endif
+}
+
+static unsigned int mv_scr_offset(unsigned int sc_reg_in)
+{
+ unsigned int ofs;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ case SCR_CONTROL:
+ case SCR_ERROR:
+ ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
+ break;
+ case SCR_ACTIVE:
+ ofs = SATA_ACTIVE_OFS; /* active is not with the others */
+ break;
+ default:
+ ofs = 0xffffffffU;
+ break;
+ }
+ return ofs;
+}
+
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
+{
+ unsigned int ofs = mv_scr_offset(sc_reg_in);
+
+ if (ofs != 0xffffffffU) {
+ *val = readl(mv_ap_base(link->ap) + ofs);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+{
+ unsigned int ofs = mv_scr_offset(sc_reg_in);
+
+ if (ofs != 0xffffffffU) {
+ writelfl(val, mv_ap_base(link->ap) + ofs);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static void mv6_dev_config(struct ata_device *adev)
+{
+ /*
+ * Deal with Gen-II ("mv6") hardware quirks/restrictions:
+ *
+ * Gen-II does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ *
+ * We don't have hob_nsect when doing NCQ commands on Gen-II.
+ * See mv_qc_prep() for more info.
+ */
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ if (sata_pmp_attached(adev->link->ap)) {
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ ata_dev_printk(adev, KERN_INFO,
+ "NCQ disabled for command-based switching\n");
+ } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
+ adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
+ ata_dev_printk(adev, KERN_INFO,
+ "max_sectors limited to %u for NCQ\n",
+ adev->max_sectors);
+ }
+ }
+}
+
+static int mv_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_link *link = qc->dev->link;
+ struct ata_port *ap = link->ap;
+ struct mv_port_priv *pp = ap->private_data;
+
+ /*
+ * Don't allow new commands if we're in a delayed EH state
+ * for NCQ and/or FIS-based switching.
+ */
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+ return ATA_DEFER_PORT;
+ /*
+ * If the port is completely idle, then allow the new qc.
+ */
+ if (ap->nr_active_links == 0)
+ return 0;
+
+ /*
+ * The port is operating in host queuing mode (EDMA) with NCQ
+ * enabled, allow multiple NCQ commands. EDMA also allows
+ * queueing multiple DMA commands but libata core currently
+ * doesn't allow it.
+ */
+ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
+ (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
+ return 0;
+
+ return ATA_DEFER_PORT;
+}
+
+static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
+{
+ u32 new_fiscfg, old_fiscfg;
+ u32 new_ltmode, old_ltmode;
+ u32 new_haltcond, old_haltcond;
+
+ old_fiscfg = readl(port_mmio + FISCFG_OFS);
+ old_ltmode = readl(port_mmio + LTMODE_OFS);
+ old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
+
+ new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
+ new_ltmode = old_ltmode & ~LTMODE_BIT8;
+ new_haltcond = old_haltcond | EDMA_ERR_DEV;
+
+ if (want_fbs) {
+ new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode | LTMODE_BIT8;
+ if (want_ncq)
+ new_haltcond &= ~EDMA_ERR_DEV;
+ else
+ new_fiscfg |= FISCFG_WAIT_DEV_ERR;
+ }
+
+ if (new_fiscfg != old_fiscfg)
+ writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
+ if (new_ltmode != old_ltmode)
+ writelfl(new_ltmode, port_mmio + LTMODE_OFS);
+ if (new_haltcond != old_haltcond)
+ writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
+}
+
+static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
+{
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ u32 old, new;
+
+ /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
+ old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
+ if (want_ncq)
+ new = old | (1 << 22);
+ else
+ new = old & ~(1 << 22);
+ if (new != old)
+ writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
+}
+
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
+{
+ u32 cfg;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = mv_ap_base(ap);
+
+ /* set up non-NCQ EDMA configuration */
+ cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
+ pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
+
+ if (IS_GEN_I(hpriv))
+ cfg |= (1 << 8); /* enab config burst size mask */
+
+ else if (IS_GEN_II(hpriv)) {
+ cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
+ mv_60x1_errata_sata25(ap, want_ncq);
+
+ } else if (IS_GEN_IIE(hpriv)) {
+ int want_fbs = sata_pmp_attached(ap);
+ /*
+ * Possible future enhancement:
+ *
+ * The chip can use FBS with non-NCQ, if we allow it,
+ * But first we need to have the error handling in place
+ * for this mode (datasheet section 7.3.15.4.2.3).
+ * So disallow non-NCQ FBS for now.
+ */
+ want_fbs &= want_ncq;
+
+ mv_config_fbs(port_mmio, want_ncq, want_fbs);
+
+ if (want_fbs) {
+ pp->pp_flags |= MV_PP_FLAG_FBS_EN;
+ cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
+ }
+
+ cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
+ cfg |= (1 << 22); /* enab 4-entry host queue cache */
+ if (!IS_SOC(hpriv))
+ cfg |= (1 << 18); /* enab early completion */
+ if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
+ cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
+ }
+
+ if (want_ncq) {
+ cfg |= EDMA_CFG_NCQ;
+ pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
+ } else
+ pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
+
+ writelfl(cfg, port_mmio + EDMA_CFG_OFS);
+}
+
+static void mv_port_free_dma_mem(struct ata_port *ap)
+{
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
+ int tag;
+
+ if (pp->crqb) {
+ dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
+ pp->crqb = NULL;
+ }
+ if (pp->crpb) {
+ dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
+ pp->crpb = NULL;
+ }
+ /*
+ * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
+ * For later hardware, we have one unique sg_tbl per NCQ tag.
+ */
+ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+ if (pp->sg_tbl[tag]) {
+ if (tag == 0 || !IS_GEN_I(hpriv))
+ dma_pool_free(hpriv->sg_tbl_pool,
+ pp->sg_tbl[tag],
+ pp->sg_tbl_dma[tag]);
+ pp->sg_tbl[tag] = NULL;
+ }
+ }
+}
+
+/**
+ * mv_port_start - Port specific init/start routine.
+ * @ap: ATA channel to manipulate
+ *
+ * Allocate and point to DMA memory, init port private memory,
+ * zero indices.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp;
+ int tag;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+ ap->private_data = pp;
+
+ pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ if (!pp->crqb)
+ return -ENOMEM;
+ memset(pp->crqb, 0, MV_CRQB_Q_SZ);
+
+ pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ if (!pp->crpb)
+ goto out_port_free_dma_mem;
+ memset(pp->crpb, 0, MV_CRPB_Q_SZ);
+
+ /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
+ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
+ ap->flags |= ATA_FLAG_AN;
+ /*
+ * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
+ * For later hardware, we need one unique sg_tbl per NCQ tag.
+ */
+ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+ if (tag == 0 || !IS_GEN_I(hpriv)) {
+ pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
+ GFP_KERNEL, &pp->sg_tbl_dma[tag]);
+ if (!pp->sg_tbl[tag])
+ goto out_port_free_dma_mem;
+ } else {
+ pp->sg_tbl[tag] = pp->sg_tbl[0];
+ pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
+ }
+ }
+ return 0;
+
+out_port_free_dma_mem:
+ mv_port_free_dma_mem(ap);
+ return -ENOMEM;
+}
+
+/**
+ * mv_port_stop - Port specific cleanup/stop routine.
+ * @ap: ATA channel to manipulate
+ *
+ * Stop DMA, cleanup port memory.
+ *
+ * LOCKING:
+ * This routine uses the host lock to protect the DMA stop.
+ */
+static void mv_port_stop(struct ata_port *ap)
+{
+ mv_stop_edma(ap);
+ mv_enable_port_irqs(ap, 0);
+ mv_port_free_dma_mem(ap);
+}
+
+/**
+ * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
+ * @qc: queued command whose SG list to source from
+ *
+ * Populate the SG list and mark the last entry.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct mv_port_priv *pp = qc->ap->private_data;
+ struct scatterlist *sg;
+ struct mv_sg *mv_sg, *last_sg = NULL;
+ unsigned int si;
+
+ mv_sg = pp->sg_tbl[qc->tag];
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ u32 offset = addr & 0xffff;
+ u32 len = sg_len;
+
+ if ((offset + sg_len > 0x10000))
+ len = 0x10000 - offset;
+
+ mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
+ mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ mv_sg->flags_size = cpu_to_le32(len & 0xffff);
+
+ sg_len -= len;
+ addr += len;
+
+ last_sg = mv_sg;
+ mv_sg++;
+ }
+ }
+
+ if (likely(last_sg))
+ last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
+}
+
+static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
+{
+ u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+ (last ? CRQB_CMD_LAST : 0);
+ *cmdw = cpu_to_le16(tmp);
+}
+
+/**
+ * mv_qc_prep - Host specific command preparation.
+ * @qc: queued command to prepare
+ *
+ * This routine simply redirects to the general purpose routine
+ * if command is not DMA. Else, it handles prep of the CRQB
+ * (command request block), does some sanity checking, and calls
+ * the SG load routine.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mv_port_priv *pp = ap->private_data;
+ __le16 *cw;
+ struct ata_taskfile *tf;
+ u16 flags = 0;
+ unsigned in_index;
+
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ))
+ return;
+
+ /* Fill in command request block
+ */
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ flags |= CRQB_FLAG_READ;
+ WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
+ flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
+
+ /* get current queue index from software */
+ in_index = pp->req_idx;
+
+ pp->crqb[in_index].sg_addr =
+ cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
+ pp->crqb[in_index].sg_addr_hi =
+ cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
+ pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
+
+ cw = &pp->crqb[in_index].ata_cmd[0];
+ tf = &qc->tf;
+
+ /* Sadly, the CRQB cannot accomodate all registers--there are
+ * only 11 bytes...so we must pick and choose required
+ * registers based on the command. So, we drop feature and
+ * hob_feature for [RW] DMA commands, but they are needed for
+ * NCQ. NCQ will drop hob_nsect.
+ */
+ switch (tf->command) {
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
+ mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
+ break;
+ case ATA_CMD_FPDMA_READ:
+ case ATA_CMD_FPDMA_WRITE:
+ mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
+ mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
+ break;
+ default:
+ /* The only other commands EDMA supports in non-queued and
+ * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
+ * of which are defined/used by Linux. If we get here, this
+ * driver needs work.
+ *
+ * FIXME: modify libata to give qc_prep a return value and
+ * return error here.
+ */
+ BUG_ON(tf->command);
+ break;
+ }
+ mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
+ mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
+ mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
+ mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
+ mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
+ mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
+ mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
+ mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
+ mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+ mv_fill_sg(qc);
+}
+
+/**
+ * mv_qc_prep_iie - Host specific command preparation.
+ * @qc: queued command to prepare
+ *
+ * This routine simply redirects to the general purpose routine
+ * if command is not DMA. Else, it handles prep of the CRQB
+ * (command request block), does some sanity checking, and calls
+ * the SG load routine.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_crqb_iie *crqb;
+ struct ata_taskfile *tf;
+ unsigned in_index;
+ u32 flags = 0;
+
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ))
+ return;
+
+ /* Fill in Gen IIE command request block */
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ flags |= CRQB_FLAG_READ;
+
+ WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
+ flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= qc->tag << CRQB_HOSTQ_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
+
+ /* get current queue index from software */
+ in_index = pp->req_idx;
+
+ crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
+ crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
+ crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
+ crqb->flags = cpu_to_le32(flags);
+
+ tf = &qc->tf;
+ crqb->ata_cmd[0] = cpu_to_le32(
+ (tf->command << 16) |
+ (tf->feature << 24)
+ );
+ crqb->ata_cmd[1] = cpu_to_le32(
+ (tf->lbal << 0) |
+ (tf->lbam << 8) |
+ (tf->lbah << 16) |
+ (tf->device << 24)
+ );
+ crqb->ata_cmd[2] = cpu_to_le32(
+ (tf->hob_lbal << 0) |
+ (tf->hob_lbam << 8) |
+ (tf->hob_lbah << 16) |
+ (tf->hob_feature << 24)
+ );
+ crqb->ata_cmd[3] = cpu_to_le32(
+ (tf->nsect << 0) |
+ (tf->hob_nsect << 8)
+ );
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+ mv_fill_sg(qc);
+}
+
+/**
+ * mv_qc_issue - Initiate a command to the host
+ * @qc: queued command to start
+ *
+ * This routine simply redirects to the general purpose routine
+ * if command is not DMA. Else, it sanity checks our local
+ * caches of the request producer/consumer indices then enables
+ * DMA and bumps the request producer index.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_port_priv *pp = ap->private_data;
+ u32 in_index;
+
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ)) {
+ static int limit_warnings = 10;
+ /*
+ * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
+ *
+ * Someday, we might implement special polling workarounds
+ * for these, but it all seems rather unnecessary since we
+ * normally use only DMA for commands which transfer more
+ * than a single block of data.
+ *
+ * Much of the time, this could just work regardless.
+ * So for now, just log the incident, and allow the attempt.
+ */
+ if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
+ --limit_warnings;
+ ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
+ ": attempting PIO w/multiple DRQ: "
+ "this may fail due to h/w errata\n");
+ }
+ /*
+ * We're about to send a non-EDMA capable command to the
+ * port. Turn off EDMA so there won't be problems accessing
+ * shadow block, etc registers.
+ */
+ mv_stop_edma(ap);
+ mv_enable_port_irqs(ap, ERR_IRQ);
+ mv_pmp_select(ap, qc->dev->link->pmp);
+ return ata_sff_qc_issue(qc);
+ }
+
+ mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
+
+ pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
+ in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
+
+ /* and write the request in pointer to kick the EDMA to life */
+ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
+ port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+ return 0;
+}
+
+static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
+{
+ struct mv_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
+ return NULL;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
+ qc = NULL;
+ return qc;
+}
+
+static void mv_pmp_error_handler(struct ata_port *ap)
+{
+ unsigned int pmp, pmp_map;
+ struct mv_port_priv *pp = ap->private_data;
+
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
+ /*
+ * Perform NCQ error analysis on failed PMPs
+ * before we freeze the port entirely.
+ *
+ * The failed PMPs are marked earlier by mv_pmp_eh_prep().
+ */
+ pmp_map = pp->delayed_eh_pmp_map;
+ pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+ pmp_map &= ~this_pmp;
+ ata_eh_analyze_ncq_error(link);
+ }
+ }
+ ata_port_freeze(ap);
+ }
+ sata_pmp_error_handler(ap);
+}
+
+static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+
+ return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
+}
+
+static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
+{
+ struct ata_eh_info *ehi;
+ unsigned int pmp;
+
+ /*
+ * Initialize EH info for PMPs which saw device errors
+ */
+ ehi = &ap->link.eh_info;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+
+ pmp_map &= ~this_pmp;
+ ehi = &link->eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "dev err");
+ ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_RESET;
+ ata_link_abort(link);
+ }
+ }
+}
+
+static int mv_req_q_empty(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 in_ptr, out_ptr;
+
+ in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ return (in_ptr == out_ptr); /* 1 == queue_is_empty */
+}
+
+static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
+{
+ struct mv_port_priv *pp = ap->private_data;
+ int failed_links;
+ unsigned int old_map, new_map;
+
+ /*
+ * Device error during FBS+NCQ operation:
+ *
+ * Set a port flag to prevent further I/O being enqueued.
+ * Leave the EDMA running to drain outstanding commands from this port.
+ * Perform the post-mortem/EH only when all responses are complete.
+ * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
+ */
+ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
+ pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
+ pp->delayed_eh_pmp_map = 0;
+ }
+ old_map = pp->delayed_eh_pmp_map;
+ new_map = old_map | mv_get_err_pmp_map(ap);
+
+ if (old_map != new_map) {
+ pp->delayed_eh_pmp_map = new_map;
+ mv_pmp_eh_prep(ap, new_map & ~old_map);
+ }
+ failed_links = hweight16(new_map);
+
+ ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
+ "failed_links=%d nr_active_links=%d\n",
+ __func__, pp->delayed_eh_pmp_map,
+ ap->qc_active, failed_links,
+ ap->nr_active_links);
+
+ if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
+ mv_process_crpb_entries(ap, pp);
+ mv_stop_edma(ap);
+ mv_eh_freeze(ap);
+ ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
+ return 1; /* handled */
+ }
+ ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
+ return 1; /* handled */
+}
+
+static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
+{
+ /*
+ * Possible future enhancement:
+ *
+ * FBS+non-NCQ operation is not yet implemented.
+ * See related notes in mv_edma_cfg().
+ *
+ * Device error during FBS+non-NCQ operation:
+ *
+ * We need to snapshot the shadow registers for each failed command.
+ * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
+ */
+ return 0; /* not handled */
+}
+
+static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
+{
+ struct mv_port_priv *pp = ap->private_data;
+
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+ return 0; /* EDMA was not active: not handled */
+ if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
+ return 0; /* FBS was not active: not handled */
+
+ if (!(edma_err_cause & EDMA_ERR_DEV))
+ return 0; /* non DEV error: not handled */
+ edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
+ if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
+ return 0; /* other problems: not handled */
+
+ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
+ /*
+ * EDMA should NOT have self-disabled for this case.
+ * If it did, then something is wrong elsewhere,
+ * and we cannot handle it here.
+ */
+ if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+ ata_port_printk(ap, KERN_WARNING,
+ "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
+ return 0; /* not handled */
+ }
+ return mv_handle_fbs_ncq_dev_err(ap);
+ } else {
+ /*
+ * EDMA should have self-disabled for this case.
+ * If it did not, then something is wrong elsewhere,
+ * and we cannot handle it here.
+ */
+ if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
+ ata_port_printk(ap, KERN_WARNING,
+ "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
+ return 0; /* not handled */
+ }
+ return mv_handle_fbs_non_ncq_dev_err(ap);
+ }
+ return 0; /* not handled */
+}
+
+static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ char *when = "idle";
+
+ ata_ehi_clear_desc(ehi);
+ if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ when = "disabled";
+ } else if (edma_was_enabled) {
+ when = "EDMA enabled";
+ } else {
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
+ when = "polling";
+ }
+ ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
+ ehi->err_mask |= AC_ERR_OTHER;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+}
+
+/**
+ * mv_err_intr - Handle error interrupts on the port
+ * @ap: ATA channel to manipulate
+ * @qc: affected command (non-NCQ), or NULL
+ *
+ * Most cases require a full reset of the chip's state machine,
+ * which also performs a COMRESET.
+ * Also, if the port disabled DMA, update our cached copy to match.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_err_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 edma_err_cause, eh_freeze_mask, serr = 0;
+ u32 fis_cause = 0;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ unsigned int action = 0, err_mask = 0;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_queued_cmd *qc;
+ int abort = 0;
+
+ /*
+ * Read and clear the SError and err_cause bits.
+ * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
+ * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
+ */
+ sata_scr_read(&ap->link, SCR_ERROR, &serr);
+ sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
+
+ edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+ if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
+ fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
+ writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
+ }
+ writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ if (edma_err_cause & EDMA_ERR_DEV) {
+ /*
+ * Device errors during FIS-based switching operation
+ * require special handling.
+ */
+ if (mv_handle_dev_err(ap, edma_err_cause))
+ return;
+ }
+
+ qc = mv_get_active_qc(ap);
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
+ edma_err_cause, pp->pp_flags);
+
+ if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
+ ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
+ if (fis_cause & SATA_FIS_IRQ_AN) {
+ u32 ec = edma_err_cause &
+ ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
+ sata_async_notification(ap);
+ if (!ec)
+ return; /* Just an AN; no need for the nukes */
+ ata_ehi_push_desc(ehi, "SDB notify");
+ }
+ }
+ /*
+ * All generations share these EDMA error cause bits:
+ */
+ if (edma_err_cause & EDMA_ERR_DEV) {
+ err_mask |= AC_ERR_DEV;
+ action |= ATA_EH_RESET;
+ ata_ehi_push_desc(ehi, "dev error");
+ }
+ if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
+ EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR)) {
+ err_mask |= AC_ERR_ATA_BUS;
+ action |= ATA_EH_RESET;
+ ata_ehi_push_desc(ehi, "parity error");
+ }
+ if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
+ "dev disconnect" : "dev connect");
+ action |= ATA_EH_RESET;
+ }
+
+ /*
+ * Gen-I has a different SELF_DIS bit,
+ * different FREEZE bits, and no SERR bit:
+ */
+ if (IS_GEN_I(hpriv)) {
+ eh_freeze_mask = EDMA_EH_FREEZE_5;
+ if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ ata_ehi_push_desc(ehi, "EDMA self-disable");
+ }
+ } else {
+ eh_freeze_mask = EDMA_EH_FREEZE;
+ if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ ata_ehi_push_desc(ehi, "EDMA self-disable");
+ }
+ if (edma_err_cause & EDMA_ERR_SERR) {
+ ata_ehi_push_desc(ehi, "SError=%08x", serr);
+ err_mask |= AC_ERR_ATA_BUS;
+ action |= ATA_EH_RESET;
+ }
+ }
+
+ if (!err_mask) {
+ err_mask = AC_ERR_OTHER;
+ action |= ATA_EH_RESET;
+ }
+
+ ehi->serror |= serr;
+ ehi->action |= action;
+
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ if (err_mask == AC_ERR_DEV) {
+ /*
+ * Cannot do ata_port_freeze() here,
+ * because it would kill PIO access,
+ * which is needed for further diagnosis.
+ */
+ mv_eh_freeze(ap);
+ abort = 1;
+ } else if (edma_err_cause & eh_freeze_mask) {
+ /*
+ * Note to self: ata_port_freeze() calls ata_port_abort()
+ */
+ ata_port_freeze(ap);
+ } else {
+ abort = 1;
+ }
+
+ if (abort) {
+ if (qc)
+ ata_link_abort(qc->dev->link);
+ else
+ ata_port_abort(ap);
+ }
+}
+
+static void mv_process_crpb_response(struct ata_port *ap,
+ struct mv_crpb *response, unsigned int tag, int ncq_enabled)
+{
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+
+ if (qc) {
+ u8 ata_status;
+ u16 edma_status = le16_to_cpu(response->flags);
+ /*
+ * edma_status from a response queue entry:
+ * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
+ * MSB is saved ATA status from command completion.
+ */
+ if (!ncq_enabled) {
+ u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
+ if (err_cause) {
+ /*
+ * Error will be seen/handled by mv_err_intr().
+ * So do nothing at all here.
+ */
+ return;
+ }
+ }
+ ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
+ if (!ac_err_mask(ata_status))
+ ata_qc_complete(qc);
+ /* else: leave it for mv_err_intr() */
+ } else {
+ ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
+ __func__, tag);
+ }
+}
+
+static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ u32 in_index;
+ bool work_done = false;
+ int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
+
+ /* Get the hardware queue position index */
+ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
+ >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+ /* Process new responses from since the last time we looked */
+ while (in_index != pp->resp_idx) {
+ unsigned int tag;
+ struct mv_crpb *response = &pp->crpb[pp->resp_idx];
+
+ pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
+
+ if (IS_GEN_I(hpriv)) {
+ /* 50xx: no NCQ, only one command active at a time */
+ tag = ap->link.active_tag;
+ } else {
+ /* Gen II/IIE: get command tag from CRPB entry */
+ tag = le16_to_cpu(response->id) & 0x1f;
+ }
+ mv_process_crpb_response(ap, response, tag, ncq_enabled);
+ work_done = true;
+ }
+
+ /* Update the software queue position index in hardware */
+ if (work_done)
+ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
+ (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
+ port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+}
+
+static void mv_port_intr(struct ata_port *ap, u32 port_cause)
+{
+ struct mv_port_priv *pp;
+ int edma_was_enabled;
+
+ if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ mv_unexpected_intr(ap, 0);
+ return;
+ }
+ /*
+ * Grab a snapshot of the EDMA_EN flag setting,
+ * so that we have a consistent view for this port,
+ * even if something we call of our routines changes it.
+ */
+ pp = ap->private_data;
+ edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+ /*
+ * Process completed CRPB response(s) before other events.
+ */
+ if (edma_was_enabled && (port_cause & DONE_IRQ)) {
+ mv_process_crpb_entries(ap, pp);
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+ mv_handle_fbs_ncq_dev_err(ap);
+ }
+ /*
+ * Handle chip-reported errors, or continue on to handle PIO.
+ */
+ if (unlikely(port_cause & ERR_IRQ)) {
+ mv_err_intr(ap);
+ } else if (!edma_was_enabled) {
+ struct ata_queued_cmd *qc = mv_get_active_qc(ap);
+ if (qc)
+ ata_sff_host_intr(ap, qc);
+ else
+ mv_unexpected_intr(ap, edma_was_enabled);
+ }
+}
+
+/**
+ * mv_host_intr - Handle all interrupts on the given host controller
+ * @host: host specific structure
+ * @main_irq_cause: Main interrupt cause register for the chip.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base, *hc_mmio;
+ unsigned int handled = 0, port;
+
+ for (port = 0; port < hpriv->n_ports; port++) {
+ struct ata_port *ap = host->ports[port];
+ unsigned int p, shift, hardport, port_cause;
+
+ MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
+ /*
+ * Each hc within the host has its own hc_irq_cause register,
+ * where the interrupting ports bits get ack'd.
+ */
+ if (hardport == 0) { /* first port on this hc ? */
+ u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
+ u32 port_mask, ack_irqs;
+ /*
+ * Skip this entire hc if nothing pending for any ports
+ */
+ if (!hc_cause) {
+ port += MV_PORTS_PER_HC - 1;
+ continue;
+ }
+ /*
+ * We don't need/want to read the hc_irq_cause register,
+ * because doing so hurts performance, and
+ * main_irq_cause already gives us everything we need.
+ *
+ * But we do have to *write* to the hc_irq_cause to ack
+ * the ports that we are handling this time through.
+ *
+ * This requires that we create a bitmap for those
+ * ports which interrupted us, and use that bitmap
+ * to ack (only) those ports via hc_irq_cause.
+ */
+ ack_irqs = 0;
+ for (p = 0; p < MV_PORTS_PER_HC; ++p) {
+ if ((port + p) >= hpriv->n_ports)
+ break;
+ port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
+ if (hc_cause & port_mask)
+ ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
+ }
+ hc_mmio = mv_hc_base_from_port(mmio, port);
+ writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
+ handled = 1;
+ }
+ /*
+ * Handle interrupts signalled for this port:
+ */
+ port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
+ if (port_cause)
+ mv_port_intr(ap, port_cause);
+ }
+ return handled;
+}
+
+static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ struct ata_eh_info *ehi;
+ unsigned int i, err_mask, printed = 0;
+ u32 err_cause;
+
+ err_cause = readl(mmio + hpriv->irq_cause_ofs);
+
+ dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
+ err_cause);
+
+ DPRINTK("All regs @ PCI error\n");
+ mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
+
+ writelfl(0, mmio + hpriv->irq_cause_ofs);
+
+ for (i = 0; i < host->n_ports; i++) {
+ ap = host->ports[i];
+ if (!ata_link_offline(&ap->link)) {
+ ehi = &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ if (!printed++)
+ ata_ehi_push_desc(ehi,
+ "PCI err cause 0x%08x", err_cause);
+ err_mask = AC_ERR_HOST_BUS;
+ ehi->action = ATA_EH_RESET;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ ata_port_freeze(ap);
+ }
+ }
+ return 1; /* handled */
+}
+
+/**
+ * mv_interrupt - Main interrupt event handler
+ * @irq: unused
+ * @dev_instance: private data; in this case the host structure
+ *
+ * Read the read only register to determine if any host
+ * controllers have pending interrupts. If so, call lower level
+ * routine to handle. Also check for PCI errors which are only
+ * reported here.
+ *
+ * LOCKING:
+ * This routine holds the host lock while processing pending
+ * interrupts.
+ */
+static irqreturn_t mv_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct mv_host_priv *hpriv = host->private_data;
+ unsigned int handled = 0;
+ u32 main_irq_cause, pending_irqs;
+
+ spin_lock(&host->lock);
+ main_irq_cause = readl(hpriv->main_irq_cause_addr);
+ pending_irqs = main_irq_cause & hpriv->main_irq_mask;
+ /*
+ * Deal with cases where we either have nothing pending, or have read
+ * a bogus register value which can indicate HW removal or PCI fault.
+ */
+ if (pending_irqs && main_irq_cause != 0xffffffffU) {
+ if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
+ handled = mv_pci_error(host, hpriv->base);
+ else
+ handled = mv_host_intr(host, pending_irqs);
+ }
+ spin_unlock(&host->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
+{
+ unsigned int ofs;
+
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ case SCR_ERROR:
+ case SCR_CONTROL:
+ ofs = sc_reg_in * sizeof(u32);
+ break;
+ default:
+ ofs = 0xffffffffU;
+ break;
+ }
+ return ofs;
+}
+
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
+{
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
+ void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
+ unsigned int ofs = mv5_scr_offset(sc_reg_in);
+
+ if (ofs != 0xffffffffU) {
+ *val = readl(addr + ofs);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+{
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
+ void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
+ unsigned int ofs = mv5_scr_offset(sc_reg_in);
+
+ if (ofs != 0xffffffffU) {
+ writelfl(val, addr + ofs);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ int early_5080;
+
+ early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
+
+ if (!early_5080) {
+ u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
+ tmp |= (1 << 0);
+ writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
+ }
+
+ mv_reset_pci_bus(host, mmio);
+}
+
+static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+ writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
+}
+
+static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
+{
+ void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
+ u32 tmp;
+
+ tmp = readl(phy_mmio + MV5_PHY_MODE);
+
+ hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
+ hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
+}
+
+static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+ u32 tmp;
+
+ writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
+
+ /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
+
+ tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
+ tmp |= ~(1 << 0);
+ writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
+}
+
+static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port)
+{
+ void __iomem *phy_mmio = mv5_phy_base(mmio, port);
+ const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
+ u32 tmp;
+ int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
+
+ if (fix_apm_sq) {
+ tmp = readl(phy_mmio + MV5_LTMODE_OFS);
+ tmp |= (1 << 19);
+ writel(tmp, phy_mmio + MV5_LTMODE_OFS);
+
+ tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
+ tmp &= ~0x3;
+ tmp |= 0x1;
+ writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
+ }
+
+ tmp = readl(phy_mmio + MV5_PHY_MODE);
+ tmp &= ~mask;
+ tmp |= hpriv->signal[port].pre;
+ tmp |= hpriv->signal[port].amps;
+ writel(tmp, phy_mmio + MV5_PHY_MODE);
+}
+
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ mv_reset_channel(hpriv, mmio, port);
+
+ ZERO(0x028); /* command */
+ writel(0x11f, port_mmio + EDMA_CFG_OFS);
+ ZERO(0x004); /* timer */
+ ZERO(0x008); /* irq err cause */
+ ZERO(0x00c); /* irq err mask */
+ ZERO(0x010); /* rq bah */
+ ZERO(0x014); /* rq inp */
+ ZERO(0x018); /* rq outp */
+ ZERO(0x01c); /* respq bah */
+ ZERO(0x024); /* respq outp */
+ ZERO(0x020); /* respq inp */
+ ZERO(0x02c); /* test control */
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
+}
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int hc)
+{
+ void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+ u32 tmp;
+
+ ZERO(0x00c);
+ ZERO(0x010);
+ ZERO(0x014);
+ ZERO(0x018);
+
+ tmp = readl(hc_mmio + 0x20);
+ tmp &= 0x1c1c1c1c;
+ tmp |= 0x03030303;
+ writel(tmp, hc_mmio + 0x20);
+}
+#undef ZERO
+
+static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int n_hc)
+{
+ unsigned int hc, port;
+
+ for (hc = 0; hc < n_hc; hc++) {
+ for (port = 0; port < MV_PORTS_PER_HC; port++)
+ mv5_reset_hc_port(hpriv, mmio,
+ (hc * MV_PORTS_PER_HC) + port);
+
+ mv5_reset_one_hc(hpriv, mmio, hc);
+ }
+
+ return 0;
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, mmio + (reg))
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ u32 tmp;
+
+ tmp = readl(mmio + MV_PCI_MODE_OFS);
+ tmp &= 0xff00ffff;
+ writel(tmp, mmio + MV_PCI_MODE_OFS);
+
+ ZERO(MV_PCI_DISC_TIMER);
+ ZERO(MV_PCI_MSI_TRIGGER);
+ writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
+ ZERO(MV_PCI_SERR_MASK);
+ ZERO(hpriv->irq_cause_ofs);
+ ZERO(hpriv->irq_mask_ofs);
+ ZERO(MV_PCI_ERR_LOW_ADDRESS);
+ ZERO(MV_PCI_ERR_HIGH_ADDRESS);
+ ZERO(MV_PCI_ERR_ATTRIBUTE);
+ ZERO(MV_PCI_ERR_COMMAND);
+}
+#undef ZERO
+
+static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+ u32 tmp;
+
+ mv5_reset_flash(hpriv, mmio);
+
+ tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
+ tmp &= 0x3;
+ tmp |= (1 << 5) | (1 << 6);
+ writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
+}
+
+/**
+ * mv6_reset_hc - Perform the 6xxx global soft reset
+ * @mmio: base address of the HBA
+ *
+ * This routine only applies to 6xxx parts.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int n_hc)
+{
+ void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
+ int i, rc = 0;
+ u32 t;
+
+ /* Following procedure defined in PCI "main command and status
+ * register" table.
+ */
+ t = readl(reg);
+ writel(t | STOP_PCI_MASTER, reg);
+
+ for (i = 0; i < 1000; i++) {
+ udelay(1);
+ t = readl(reg);
+ if (PCI_MASTER_EMPTY & t)
+ break;
+ }
+ if (!(PCI_MASTER_EMPTY & t)) {
+ printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
+ rc = 1;
+ goto done;
+ }
+
+ /* set reset */
+ i = 5;
+ do {
+ writel(t | GLOB_SFT_RST, reg);
+ t = readl(reg);
+ udelay(1);
+ } while (!(GLOB_SFT_RST & t) && (i-- > 0));
+
+ if (!(GLOB_SFT_RST & t)) {
+ printk(KERN_ERR DRV_NAME ": can't set global reset\n");
+ rc = 1;
+ goto done;
+ }
+
+ /* clear reset and *reenable the PCI master* (not mentioned in spec) */
+ i = 5;
+ do {
+ writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
+ t = readl(reg);
+ udelay(1);
+ } while ((GLOB_SFT_RST & t) && (i-- > 0));
+
+ if (GLOB_SFT_RST & t) {
+ printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
+ rc = 1;
+ }
+done:
+ return rc;
+}
+
+static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
+{
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ tmp = readl(mmio + MV_RESET_CFG_OFS);
+ if ((tmp & (1 << 0)) == 0) {
+ hpriv->signal[idx].amps = 0x7 << 8;
+ hpriv->signal[idx].pre = 0x1 << 5;
+ return;
+ }
+
+ port_mmio = mv_port_base(mmio, idx);
+ tmp = readl(port_mmio + PHY_MODE2);
+
+ hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
+ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
+}
+
+static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
+{
+ writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
+}
+
+static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ u32 hp_flags = hpriv->hp_flags;
+ int fix_phy_mode2 =
+ hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
+ int fix_phy_mode4 =
+ hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
+ u32 m2, m3;
+
+ if (fix_phy_mode2) {
+ m2 = readl(port_mmio + PHY_MODE2);
+ m2 &= ~(1 << 16);
+ m2 |= (1 << 31);
+ writel(m2, port_mmio + PHY_MODE2);
+
+ udelay(200);
+
+ m2 = readl(port_mmio + PHY_MODE2);
+ m2 &= ~((1 << 16) | (1 << 31));
+ writel(m2, port_mmio + PHY_MODE2);
+
+ udelay(200);
+ }
+
+ /*
+ * Gen-II/IIe PHY_MODE3 errata RM#2:
+ * Achieves better receiver noise performance than the h/w default:
+ */
+ m3 = readl(port_mmio + PHY_MODE3);
+ m3 = (m3 & 0x1f) | (0x5555601 << 5);
+
+ /* Guideline 88F5182 (GL# SATA-S11) */
+ if (IS_SOC(hpriv))
+ m3 &= ~0x1c;
+
+ if (fix_phy_mode4) {
+ u32 m4 = readl(port_mmio + PHY_MODE4);
+ /*
+ * Enforce reserved-bit restrictions on GenIIe devices only.
+ * For earlier chipsets, force only the internal config field
+ * (workaround for errata FEr SATA#10 part 1).
+ */
+ if (IS_GEN_IIE(hpriv))
+ m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
+ else
+ m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
+ writel(m4, port_mmio + PHY_MODE4);
+ }
+ /*
+ * Workaround for 60x1-B2 errata SATA#13:
+ * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
+ * so we must always rewrite PHY_MODE3 after PHY_MODE4.
+ */
+ writel(m3, port_mmio + PHY_MODE3);
+
+ /* Revert values of pre-emphasis and signal amps to the saved ones */
+ m2 = readl(port_mmio + PHY_MODE2);
+
+ m2 &= ~MV_M2_PREAMP_MASK;
+ m2 |= hpriv->signal[port].amps;
+ m2 |= hpriv->signal[port].pre;
+ m2 &= ~(1 << 16);
+
+ /* according to mvSata 3.6.1, some IIE values are fixed */
+ if (IS_GEN_IIE(hpriv)) {
+ m2 &= ~0xC30FF01F;
+ m2 |= 0x0000900F;
+ }
+
+ writel(m2, port_mmio + PHY_MODE2);
+}
+
+/* TODO: use the generic LED interface to configure the SATA Presence */
+/* & Acitivy LEDs on the board */
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
+{
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ port_mmio = mv_port_base(mmio, idx);
+ tmp = readl(port_mmio + PHY_MODE2);
+
+ hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
+ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ mv_reset_channel(hpriv, mmio, port);
+
+ ZERO(0x028); /* command */
+ writel(0x101f, port_mmio + EDMA_CFG_OFS);
+ ZERO(0x004); /* timer */
+ ZERO(0x008); /* irq err cause */
+ ZERO(0x00c); /* irq err mask */
+ ZERO(0x010); /* rq bah */
+ ZERO(0x014); /* rq inp */
+ ZERO(0x018); /* rq outp */
+ ZERO(0x01c); /* respq bah */
+ ZERO(0x024); /* respq outp */
+ ZERO(0x020); /* respq inp */
+ ZERO(0x02c); /* test control */
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
+}
+
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ void __iomem *hc_mmio = mv_hc_base(mmio, 0);
+
+ ZERO(0x00c);
+ ZERO(0x010);
+ ZERO(0x014);
+
+}
+
+#undef ZERO
+
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc)
+{
+ unsigned int port;
+
+ for (port = 0; port < hpriv->n_ports; port++)
+ mv_soc_reset_hc_port(hpriv, mmio, port);
+
+ mv_soc_reset_one_hc(hpriv, mmio);
+
+ return 0;
+}
+
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
+{
+ u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
+
+ ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
+ if (want_gen2i)
+ ifcfg |= (1 << 7); /* enable gen2i speed */
+ writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
+}
+
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port_no)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port_no);
+
+ /*
+ * The datasheet warns against setting EDMA_RESET when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the EDMA_RESET operation.
+ */
+ mv_stop_edma_engine(port_mmio);
+ writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
+
+ if (!IS_GEN_I(hpriv)) {
+ /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
+ mv_setup_ifcfg(port_mmio, 1);
+ }
+ /*
+ * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
+ * link, and physical layers. It resets all SATA interface registers
+ * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
+ */
+ writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
+ udelay(25); /* allow reset propagation */
+ writelfl(0, port_mmio + EDMA_CMD_OFS);
+
+ hpriv->ops->phy_errata(hpriv, mmio, port_no);
+
+ if (IS_GEN_I(hpriv))
+ mdelay(1);
+}
+
+static void mv_pmp_select(struct ata_port *ap, int pmp)
+{
+ if (sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
+ int old = reg & 0xf;
+
+ if (old != pmp) {
+ reg = (reg & ~0xf) | pmp;
+ writelfl(reg, port_mmio + SATA_IFCTL_OFS);
+ }
+ }
+}
+
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return sata_std_hardreset(link, class, deadline);
+}
+
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return ata_sff_softreset(link, class, deadline);
+}
+
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->base;
+ int rc, attempts = 0, extra = 0;
+ u32 sstatus;
+ bool online;
+
+ mv_reset_channel(hpriv, mmio, ap->port_no);
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+
+ /* Workaround for errata FEr SATA#10 (part 2) */
+ do {
+ const unsigned long *timing =
+ sata_ehc_deb_timing(&link->eh_context);
+
+ rc = sata_link_hardreset(link, timing, deadline + extra,
+ &online, NULL);
+ rc = online ? -EAGAIN : rc;
+ if (rc)
+ return rc;
+ sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
+ /* Force 1.5gb/s link speed and try again */
+ mv_setup_ifcfg(mv_ap_base(ap), 0);
+ if (time_after(jiffies + HZ, deadline))
+ extra = HZ; /* only extend it once, max */
+ }
+ } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
+
+ return rc;
+}
+
+static void mv_eh_freeze(struct ata_port *ap)
+{
+ mv_stop_edma(ap);
+ mv_enable_port_irqs(ap, 0);
+}
+
+static void mv_eh_thaw(struct ata_port *ap)
+{
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ unsigned int port = ap->port_no;
+ unsigned int hardport = mv_hardport_from_port(port);
+ void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 hc_irq_cause;
+
+ /* clear EDMA errors on this port */
+ writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ /* clear pending irq events */
+ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
+ hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
+ writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+
+ mv_enable_port_irqs(ap, ERR_IRQ);
+}
+
+/**
+ * mv_port_init - Perform some early initialization on a single port.
+ * @port: libata data structure storing shadow register addresses
+ * @port_mmio: base address of the port
+ *
+ * Initialize shadow register mmio addresses, clear outstanding
+ * interrupts on the port, and unmask interrupts for the future
+ * start of the port.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
+{
+ void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
+ unsigned serr_ofs;
+
+ /* PIO related setup
+ */
+ port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
+ port->error_addr =
+ port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
+ port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
+ port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
+ port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
+ port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
+ port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
+ port->status_addr =
+ port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
+ /* special case: control/altstatus doesn't have ATA_REG_ address */
+ port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
+
+ /* unused: */
+ port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
+
+ /* Clear any currently outstanding port interrupt conditions */
+ serr_ofs = mv_scr_offset(SCR_ERROR);
+ writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
+ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ /* unmask all non-transient EDMA error interrupts */
+ writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
+
+ VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
+ readl(port_mmio + EDMA_CFG_OFS),
+ readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
+ readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
+}
+
+static unsigned int mv_in_pcix_mode(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
+ u32 reg;
+
+ if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
+ return 0; /* not PCI-X capable */
+ reg = readl(mmio + MV_PCI_MODE_OFS);
+ if ((reg & MV_PCI_MODE_MASK) == 0)
+ return 0; /* conventional PCI mode */
+ return 1; /* chip is in PCI-X mode */
+}
+
+static int mv_pci_cut_through_okay(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
+ u32 reg;
+
+ if (!mv_in_pcix_mode(host)) {
+ reg = readl(mmio + PCI_COMMAND_OFS);
+ if (reg & PCI_COMMAND_MRDTRIG)
+ return 0; /* not okay */
+ }
+ return 1; /* okay */
+}
+
+static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct mv_host_priv *hpriv = host->private_data;
+ u32 hp_flags = hpriv->hp_flags;
+
+ switch (board_idx) {
+ case chip_5080:
+ hpriv->ops = &mv5xxx_ops;
+ hp_flags |= MV_HP_GEN_I;
+
+ switch (pdev->revision) {
+ case 0x1:
+ hp_flags |= MV_HP_ERRATA_50XXB0;
+ break;
+ case 0x3:
+ hp_flags |= MV_HP_ERRATA_50XXB2;
+ break;
+ default:
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "Applying 50XXB2 workarounds to unknown rev\n");
+ hp_flags |= MV_HP_ERRATA_50XXB2;
+ break;
+ }
+ break;
+
+ case chip_504x:
+ case chip_508x:
+ hpriv->ops = &mv5xxx_ops;
+ hp_flags |= MV_HP_GEN_I;
+
+ switch (pdev->revision) {
+ case 0x0:
+ hp_flags |= MV_HP_ERRATA_50XXB0;
+ break;
+ case 0x3:
+ hp_flags |= MV_HP_ERRATA_50XXB2;
+ break;
+ default:
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
+ hp_flags |= MV_HP_ERRATA_50XXB2;
+ break;
+ }
+ break;
+
+ case chip_604x:
+ case chip_608x:
+ hpriv->ops = &mv6xxx_ops;
+ hp_flags |= MV_HP_GEN_II;
+
+ switch (pdev->revision) {
+ case 0x7:
+ hp_flags |= MV_HP_ERRATA_60X1B2;
+ break;
+ case 0x9:
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
+ default:
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
+ hp_flags |= MV_HP_ERRATA_60X1B2;
+ break;
+ }
+ break;
+
+ case chip_7042:
+ hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
+ if (pdev->vendor == PCI_VENDOR_ID_TTI &&
+ (pdev->device == 0x2300 || pdev->device == 0x2310))
+ {
+ /*
+ * Highpoint RocketRAID PCIe 23xx series cards:
+ *
+ * Unconfigured drives are treated as "Legacy"
+ * by the BIOS, and it overwrites sector 8 with
+ * a "Lgcy" metadata block prior to Linux boot.
+ *
+ * Configured drives (RAID or JBOD) leave sector 8
+ * alone, but instead overwrite a high numbered
+ * sector for the RAID metadata. This sector can
+ * be determined exactly, by truncating the physical
+ * drive capacity to a nice even GB value.
+ *
+ * RAID metadata is at: (dev->n_sectors & ~0xfffff)
+ *
+ * Warn the user, lest they think we're just buggy.
+ */
+ printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
+ " BIOS CORRUPTS DATA on all attached drives,"
+ " regardless of if/how they are configured."
+ " BEWARE!\n");
+ printk(KERN_WARNING DRV_NAME ": For data safety, do not"
+ " use sectors 8-9 on \"Legacy\" drives,"
+ " and avoid the final two gigabytes on"
+ " all RocketRAID BIOS initialized drives.\n");
+ }
+ /* drop through */
+ case chip_6042:
+ hpriv->ops = &mv6xxx_ops;
+ hp_flags |= MV_HP_GEN_IIE;
+ if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
+ hp_flags |= MV_HP_CUT_THROUGH;
+
+ switch (pdev->revision) {
+ case 0x2: /* Rev.B0: the first/only public release */
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
+ default:
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "Applying 60X1C0 workarounds to unknown rev\n");
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
+ }
+ break;
+ case chip_soc:
+ hpriv->ops = &mv_soc_ops;
+ hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
+ MV_HP_ERRATA_60X1C0;
+ break;
+
+ default:
+ dev_printk(KERN_ERR, host->dev,
+ "BUG: invalid board index %u\n", board_idx);
+ return 1;
+ }
+
+ hpriv->hp_flags = hp_flags;
+ if (hp_flags & MV_HP_PCIE) {
+ hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
+ hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
+ hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
+ } else {
+ hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
+ hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
+ hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
+ }
+
+ return 0;
+}
+
+/**
+ * mv_init_host - Perform some early initialization of the host.
+ * @host: ATA host to initialize
+ * @board_idx: controller index
+ *
+ * If possible, do an early global reset of the host. Then do
+ * our port init and clear/unmask all/relevant host interrupts.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_init_host(struct ata_host *host, unsigned int board_idx)
+{
+ int rc = 0, n_hc, port, hc;
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
+
+ rc = mv_chip_id(host, board_idx);
+ if (rc)
+ goto done;
+
+ if (IS_SOC(hpriv)) {
+ hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
+ } else {
+ hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
+ }
+
+ /* global interrupt mask: 0 == mask everything */
+ mv_set_main_irq_mask(host, ~0, 0);
+
+ n_hc = mv_get_hc_count(host->ports[0]->flags);
+
+ for (port = 0; port < host->n_ports; port++)
+ hpriv->ops->read_preamp(hpriv, port, mmio);
+
+ rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
+ if (rc)
+ goto done;
+
+ hpriv->ops->reset_flash(hpriv, mmio);
+ hpriv->ops->reset_bus(host, mmio);
+ hpriv->ops->enable_leds(hpriv, mmio);
+
+ for (port = 0; port < host->n_ports; port++) {
+ struct ata_port *ap = host->ports[port];
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ mv_port_init(&ap->ioaddr, port_mmio);
+
+#ifdef CONFIG_PCI
+ if (!IS_SOC(hpriv)) {
+ unsigned int offset = port_mmio - mmio;
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
+#endif
+ }
+
+ for (hc = 0; hc < n_hc; hc++) {
+ void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+
+ VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
+ "(before clear)=0x%08x\n", hc,
+ readl(hc_mmio + HC_CFG_OFS),
+ readl(hc_mmio + HC_IRQ_CAUSE_OFS));
+
+ /* Clear any currently outstanding hc interrupt conditions */
+ writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
+ }
+
+ if (!IS_SOC(hpriv)) {
+ /* Clear any currently outstanding host interrupt conditions */
+ writelfl(0, mmio + hpriv->irq_cause_ofs);
+
+ /* and unmask interrupt generation for host regs */
+ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+
+ /*
+ * enable only global host interrupts for now.
+ * The per-port interrupts get done later as ports are set up.
+ */
+ mv_set_main_irq_mask(host, 0, PCI_ERR);
+ }
+done:
+ return rc;
+}
+
+static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
+{
+ hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
+ MV_CRQB_Q_SZ, 0);
+ if (!hpriv->crqb_pool)
+ return -ENOMEM;
+
+ hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
+ MV_CRPB_Q_SZ, 0);
+ if (!hpriv->crpb_pool)
+ return -ENOMEM;
+
+ hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
+ MV_SG_TBL_SZ, 0);
+ if (!hpriv->sg_tbl_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
+ struct mbus_dram_target_info *dram)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ writel(0, hpriv->base + WINDOW_CTRL(i));
+ writel(0, hpriv->base + WINDOW_BASE(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ struct mbus_dram_window *cs = dram->cs + i;
+
+ writel(((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ hpriv->base + WINDOW_CTRL(i));
+ writel(cs->base, hpriv->base + WINDOW_BASE(i));
+ }
+}
+
+/**
+ * mv_platform_probe - handle a positive probe of an soc Marvell
+ * host
+ * @pdev: platform device found
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_platform_probe(struct platform_device *pdev)
+{
+ static int printed_version;
+ const struct mv_sata_platform_data *mv_platform_data;
+ const struct ata_port_info *ppi[] =
+ { &mv_port_info[chip_soc], NULL };
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ struct resource *res;
+ int n_ports, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /*
+ * Simple resource validation ..
+ */
+ if (unlikely(pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ /* allocate host */
+ mv_platform_data = pdev->dev.platform_data;
+ n_ports = mv_platform_data->n_ports;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+
+ if (!host || !hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
+
+ host->iomap = NULL;
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ hpriv->base -= MV_SATAHC0_REG_BASE;
+
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (mv_platform_data->dram != NULL)
+ mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+
+ rc = mv_create_dma_pools(hpriv, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* initialize adapter */
+ rc = mv_init_host(host, chip_soc);
+ if (rc)
+ return rc;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
+ host->n_ports);
+
+ return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+}
+
+/*
+ *
+ * mv_platform_remove - unplug a platform interface
+ * @pdev: platform device
+ *
+ * A platform bus SATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit mv_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+ return 0;
+}
+
+static struct platform_driver mv_platform_driver = {
+ .probe = mv_platform_probe,
+ .remove = __devexit_p(mv_platform_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
+#ifdef CONFIG_PCI
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+
+static struct pci_driver mv_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = mv_pci_tbl,
+ .probe = mv_pci_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+/*
+ * module options
+ */
+static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
+
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+ int rc;
+
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * mv_print_info - Dump key info to kernel log for perusal.
+ * @host: ATA host to print info about
+ *
+ * FIXME: complete this.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void mv_print_info(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct mv_host_priv *hpriv = host->private_data;
+ u8 scc;
+ const char *scc_s, *gen;
+
+ /* Use this to determine the HW stepping of the chip so we know
+ * what errata to workaround
+ */
+ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
+ if (scc == 0)
+ scc_s = "SCSI";
+ else if (scc == 0x01)
+ scc_s = "RAID";
+ else
+ scc_s = "?";
+
+ if (IS_GEN_I(hpriv))
+ gen = "I";
+ else if (IS_GEN_II(hpriv))
+ gen = "II";
+ else if (IS_GEN_IIE(hpriv))
+ gen = "IIE";
+ else
+ gen = "?";
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
+ gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
+ scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
+}
+
+/**
+ * mv_pci_init_one - handle a positive probe of a PCI Marvell host
+ * @pdev: PCI device found
+ * @ent: PCI device ID entry for the matched host
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_idx = (unsigned int)ent->driver_data;
+ const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ int n_ports, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* allocate host */
+ n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!host || !hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
+
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+ hpriv->base = host->iomap[MV_PRIMARY_BAR];
+
+ rc = pci_go_64(pdev);
+ if (rc)
+ return rc;
+
+ rc = mv_create_dma_pools(hpriv, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* initialize adapter */
+ rc = mv_init_host(host, board_idx);
+ if (rc)
+ return rc;
+
+ /* Enable interrupts */
+ if (msi && pci_enable_msi(pdev))
+ pci_intx(pdev, 1);
+
+ mv_dump_pci_cfg(pdev, 0x68);
+ mv_print_info(host);
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+ return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
+ IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
+}
+#endif
+
+static int mv_platform_probe(struct platform_device *pdev);
+static int __devexit mv_platform_remove(struct platform_device *pdev);
+
+static int __init mv_init(void)
+{
+ int rc = -ENODEV;
+#ifdef CONFIG_PCI
+ rc = pci_register_driver(&mv_pci_driver);
+ if (rc < 0)
+ return rc;
+#endif
+ rc = platform_driver_register(&mv_platform_driver);
+
+#ifdef CONFIG_PCI
+ if (rc < 0)
+ pci_unregister_driver(&mv_pci_driver);
+#endif
+ return rc;
+}
+
+static void __exit mv_exit(void)
+{
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&mv_pci_driver);
+#endif
+ platform_driver_unregister(&mv_platform_driver);
+}
+
+MODULE_AUTHOR("Brett Russ");
+MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+
+#ifdef CONFIG_PCI
+module_param(msi, int, 0444);
+MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
+#endif
+
+module_init(mv_init);
+module_exit(mv_exit);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
new file mode 100644
index 0000000..55a8eed
--- /dev/null
+++ b/drivers/ata/sata_nv.c
@@ -0,0 +1,2529 @@
+/*
+ * sata_nv.c - NVIDIA nForce SATA
+ *
+ * Copyright 2004 NVIDIA Corp. All rights reserved.
+ * Copyright 2004 Andrew Chew
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * No hardware documentation available outside of NVIDIA.
+ * This driver programs the NVIDIA SATA controller in a similar
+ * fashion as with other PCI IDE BMDMA controllers, with a few
+ * NV-specific details such as register offsets, SATA phy location,
+ * hotplug info, etc.
+ *
+ * CK804/MCP04 controllers support an alternate programming interface
+ * similar to the ADMA specification (with some modifications).
+ * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
+ * sent through the legacy interface.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_nv"
+#define DRV_VERSION "3.5"
+
+#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
+
+enum {
+ NV_MMIO_BAR = 5,
+
+ NV_PORTS = 2,
+ NV_PIO_MASK = 0x1f,
+ NV_MWDMA_MASK = 0x07,
+ NV_UDMA_MASK = 0x7f,
+ NV_PORT0_SCR_REG_OFFSET = 0x00,
+ NV_PORT1_SCR_REG_OFFSET = 0x40,
+
+ /* INT_STATUS/ENABLE */
+ NV_INT_STATUS = 0x10,
+ NV_INT_ENABLE = 0x11,
+ NV_INT_STATUS_CK804 = 0x440,
+ NV_INT_ENABLE_CK804 = 0x441,
+
+ /* INT_STATUS/ENABLE bits */
+ NV_INT_DEV = 0x01,
+ NV_INT_PM = 0x02,
+ NV_INT_ADDED = 0x04,
+ NV_INT_REMOVED = 0x08,
+
+ NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
+
+ NV_INT_ALL = 0x0f,
+ NV_INT_MASK = NV_INT_DEV |
+ NV_INT_ADDED | NV_INT_REMOVED,
+
+ /* INT_CONFIG */
+ NV_INT_CONFIG = 0x12,
+ NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
+
+ // For PCI config register 20
+ NV_MCP_SATA_CFG_20 = 0x50,
+ NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
+ NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
+ NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
+
+ NV_ADMA_MAX_CPBS = 32,
+ NV_ADMA_CPB_SZ = 128,
+ NV_ADMA_APRD_SZ = 16,
+ NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
+ NV_ADMA_APRD_SZ,
+ NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
+ NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
+ NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
+ (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
+
+ /* BAR5 offset to ADMA general registers */
+ NV_ADMA_GEN = 0x400,
+ NV_ADMA_GEN_CTL = 0x00,
+ NV_ADMA_NOTIFIER_CLEAR = 0x30,
+
+ /* BAR5 offset to ADMA ports */
+ NV_ADMA_PORT = 0x480,
+
+ /* size of ADMA port register space */
+ NV_ADMA_PORT_SIZE = 0x100,
+
+ /* ADMA port registers */
+ NV_ADMA_CTL = 0x40,
+ NV_ADMA_CPB_COUNT = 0x42,
+ NV_ADMA_NEXT_CPB_IDX = 0x43,
+ NV_ADMA_STAT = 0x44,
+ NV_ADMA_CPB_BASE_LOW = 0x48,
+ NV_ADMA_CPB_BASE_HIGH = 0x4C,
+ NV_ADMA_APPEND = 0x50,
+ NV_ADMA_NOTIFIER = 0x68,
+ NV_ADMA_NOTIFIER_ERROR = 0x6C,
+
+ /* NV_ADMA_CTL register bits */
+ NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
+ NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
+ NV_ADMA_CTL_GO = (1 << 7),
+ NV_ADMA_CTL_AIEN = (1 << 8),
+ NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
+ NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
+
+ /* CPB response flag bits */
+ NV_CPB_RESP_DONE = (1 << 0),
+ NV_CPB_RESP_ATA_ERR = (1 << 3),
+ NV_CPB_RESP_CMD_ERR = (1 << 4),
+ NV_CPB_RESP_CPB_ERR = (1 << 7),
+
+ /* CPB control flag bits */
+ NV_CPB_CTL_CPB_VALID = (1 << 0),
+ NV_CPB_CTL_QUEUE = (1 << 1),
+ NV_CPB_CTL_APRD_VALID = (1 << 2),
+ NV_CPB_CTL_IEN = (1 << 3),
+ NV_CPB_CTL_FPDMA = (1 << 4),
+
+ /* APRD flags */
+ NV_APRD_WRITE = (1 << 1),
+ NV_APRD_END = (1 << 2),
+ NV_APRD_CONT = (1 << 3),
+
+ /* NV_ADMA_STAT flags */
+ NV_ADMA_STAT_TIMEOUT = (1 << 0),
+ NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
+ NV_ADMA_STAT_HOTPLUG = (1 << 2),
+ NV_ADMA_STAT_CPBERR = (1 << 4),
+ NV_ADMA_STAT_SERROR = (1 << 5),
+ NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
+ NV_ADMA_STAT_IDLE = (1 << 8),
+ NV_ADMA_STAT_LEGACY = (1 << 9),
+ NV_ADMA_STAT_STOPPED = (1 << 10),
+ NV_ADMA_STAT_DONE = (1 << 12),
+ NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
+ NV_ADMA_STAT_TIMEOUT,
+
+ /* port flags */
+ NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
+ NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
+
+ /* MCP55 reg offset */
+ NV_CTL_MCP55 = 0x400,
+ NV_INT_STATUS_MCP55 = 0x440,
+ NV_INT_ENABLE_MCP55 = 0x444,
+ NV_NCQ_REG_MCP55 = 0x448,
+
+ /* MCP55 */
+ NV_INT_ALL_MCP55 = 0xffff,
+ NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
+ NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
+
+ /* SWNCQ ENABLE BITS*/
+ NV_CTL_PRI_SWNCQ = 0x02,
+ NV_CTL_SEC_SWNCQ = 0x04,
+
+ /* SW NCQ status bits*/
+ NV_SWNCQ_IRQ_DEV = (1 << 0),
+ NV_SWNCQ_IRQ_PM = (1 << 1),
+ NV_SWNCQ_IRQ_ADDED = (1 << 2),
+ NV_SWNCQ_IRQ_REMOVED = (1 << 3),
+
+ NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
+ NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
+ NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
+ NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
+
+ NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
+ NV_SWNCQ_IRQ_REMOVED,
+
+};
+
+/* ADMA Physical Region Descriptor - one SG segment */
+struct nv_adma_prd {
+ __le64 addr;
+ __le32 len;
+ u8 flags;
+ u8 packet_len;
+ __le16 reserved;
+};
+
+enum nv_adma_regbits {
+ CMDEND = (1 << 15), /* end of command list */
+ WNB = (1 << 14), /* wait-not-BSY */
+ IGN = (1 << 13), /* ignore this entry */
+ CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
+ DA2 = (1 << (2 + 8)),
+ DA1 = (1 << (1 + 8)),
+ DA0 = (1 << (0 + 8)),
+};
+
+/* ADMA Command Parameter Block
+ The first 5 SG segments are stored inside the Command Parameter Block itself.
+ If there are more than 5 segments the remainder are stored in a separate
+ memory area indicated by next_aprd. */
+struct nv_adma_cpb {
+ u8 resp_flags; /* 0 */
+ u8 reserved1; /* 1 */
+ u8 ctl_flags; /* 2 */
+ /* len is length of taskfile in 64 bit words */
+ u8 len; /* 3 */
+ u8 tag; /* 4 */
+ u8 next_cpb_idx; /* 5 */
+ __le16 reserved2; /* 6-7 */
+ __le16 tf[12]; /* 8-31 */
+ struct nv_adma_prd aprd[5]; /* 32-111 */
+ __le64 next_aprd; /* 112-119 */
+ __le64 reserved3; /* 120-127 */
+};
+
+
+struct nv_adma_port_priv {
+ struct nv_adma_cpb *cpb;
+ dma_addr_t cpb_dma;
+ struct nv_adma_prd *aprd;
+ dma_addr_t aprd_dma;
+ void __iomem *ctl_block;
+ void __iomem *gen_block;
+ void __iomem *notifier_clear_block;
+ u64 adma_dma_mask;
+ u8 flags;
+ int last_issue_ncq;
+};
+
+struct nv_host_priv {
+ unsigned long type;
+};
+
+struct defer_queue {
+ u32 defer_bits;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int tag[ATA_MAX_QUEUE];
+};
+
+enum ncq_saw_flag_list {
+ ncq_saw_d2h = (1U << 0),
+ ncq_saw_dmas = (1U << 1),
+ ncq_saw_sdb = (1U << 2),
+ ncq_saw_backout = (1U << 3),
+};
+
+struct nv_swncq_port_priv {
+ struct ata_prd *prd; /* our SG list */
+ dma_addr_t prd_dma; /* and its DMA mapping */
+ void __iomem *sactive_block;
+ void __iomem *irq_block;
+ void __iomem *tag_block;
+ u32 qc_active;
+
+ unsigned int last_issue_tag;
+
+ /* fifo circular queue to store deferral command */
+ struct defer_queue defer_queue;
+
+ /* for NCQ interrupt analysis */
+ u32 dhfis_bits;
+ u32 dmafis_bits;
+ u32 sdbfis_bits;
+
+ unsigned int ncq_flags;
+};
+
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM
+static int nv_pci_device_resume(struct pci_dev *pdev);
+#endif
+static void nv_ck804_host_stop(struct ata_host *host);
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void nv_nf2_freeze(struct ata_port *ap);
+static void nv_nf2_thaw(struct ata_port *ap);
+static void nv_ck804_freeze(struct ata_port *ap);
+static void nv_ck804_thaw(struct ata_port *ap);
+static int nv_adma_slave_config(struct scsi_device *sdev);
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
+static void nv_adma_irq_clear(struct ata_port *ap);
+static int nv_adma_port_start(struct ata_port *ap);
+static void nv_adma_port_stop(struct ata_port *ap);
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_adma_port_resume(struct ata_port *ap);
+#endif
+static void nv_adma_freeze(struct ata_port *ap);
+static void nv_adma_thaw(struct ata_port *ap);
+static void nv_adma_error_handler(struct ata_port *ap);
+static void nv_adma_host_stop(struct ata_host *host);
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+
+static void nv_mcp55_thaw(struct ata_port *ap);
+static void nv_mcp55_freeze(struct ata_port *ap);
+static void nv_swncq_error_handler(struct ata_port *ap);
+static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_port_start(struct ata_port *ap);
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_swncq_port_resume(struct ata_port *ap);
+#endif
+
+enum nv_host_type
+{
+ GENERIC,
+ NFORCE2,
+ NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
+ CK804,
+ ADMA,
+ MCP5x,
+ SWNCQ,
+};
+
+static const struct pci_device_id nv_pci_tbl[] = {
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver nv_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = nv_pci_tbl,
+ .probe = nv_init_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = nv_pci_device_resume,
+#endif
+ .remove = ata_pci_remove_one,
+};
+
+static struct scsi_host_template nv_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct scsi_host_template nv_adma_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = NV_ADMA_MAX_CPBS,
+ .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
+ .dma_boundary = NV_ADMA_DMA_BOUNDARY,
+ .slave_configure = nv_adma_slave_config,
+};
+
+static struct scsi_host_template nv_swncq_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = ATA_MAX_QUEUE,
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = nv_swncq_slave_config,
+};
+
+static struct ata_port_operations nv_common_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = nv_scr_read,
+ .scr_write = nv_scr_write,
+};
+
+/* OSDL bz11195 reports that link doesn't come online after hardreset
+ * on generic nv's and there have been several other similar reports
+ * on linux-ide. Disable hardreset for generic nv's.
+ */
+static struct ata_port_operations nv_generic_ops = {
+ .inherits = &nv_common_ops,
+ .hardreset = ATA_OP_NULL,
+};
+
+/* nf2 is ripe with hardreset related problems.
+ *
+ * kernel bz#3352 reports nf2/3 controllers can't determine device
+ * signature reliably. The following thread reports detection failure
+ * on cold boot with the standard debouncing timing.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/34098
+ *
+ * And bz#12176 reports that hardreset simply doesn't work on nf2.
+ * Give up on it and just don't do hardreset.
+ */
+static struct ata_port_operations nv_nf2_ops = {
+ .inherits = &nv_generic_ops,
+ .freeze = nv_nf2_freeze,
+ .thaw = nv_nf2_thaw,
+};
+
+/* For initial probing after boot and hot plugging, hardreset mostly
+ * works fine on CK804 but curiously, reprobing on the initial port by
+ * rescanning or rmmod/insmod fails to acquire the initial D2H Reg FIS
+ * in somewhat undeterministic way. Use noclassify hardreset.
+ */
+static struct ata_port_operations nv_ck804_ops = {
+ .inherits = &nv_common_ops,
+ .freeze = nv_ck804_freeze,
+ .thaw = nv_ck804_thaw,
+ .hardreset = nv_noclassify_hardreset,
+ .host_stop = nv_ck804_host_stop,
+};
+
+static struct ata_port_operations nv_adma_ops = {
+ .inherits = &nv_ck804_ops,
+
+ .check_atapi_dma = nv_adma_check_atapi_dma,
+ .sff_tf_read = nv_adma_tf_read,
+ .qc_defer = ata_std_qc_defer,
+ .qc_prep = nv_adma_qc_prep,
+ .qc_issue = nv_adma_qc_issue,
+ .sff_irq_clear = nv_adma_irq_clear,
+
+ .freeze = nv_adma_freeze,
+ .thaw = nv_adma_thaw,
+ .error_handler = nv_adma_error_handler,
+ .post_internal_cmd = nv_adma_post_internal_cmd,
+
+ .port_start = nv_adma_port_start,
+ .port_stop = nv_adma_port_stop,
+#ifdef CONFIG_PM
+ .port_suspend = nv_adma_port_suspend,
+ .port_resume = nv_adma_port_resume,
+#endif
+ .host_stop = nv_adma_host_stop,
+};
+
+/* Kernel bz#12351 reports that when SWNCQ is enabled, for hotplug to
+ * work, hardreset should be used and hardreset can't report proper
+ * signature, which suggests that mcp5x is closer to nf2 as long as
+ * reset quirkiness is concerned. Define separate ops for mcp5x with
+ * nv_noclassify_hardreset().
+ */
+static struct ata_port_operations nv_mcp5x_ops = {
+ .inherits = &nv_common_ops,
+ .hardreset = nv_noclassify_hardreset,
+};
+
+static struct ata_port_operations nv_swncq_ops = {
+ .inherits = &nv_mcp5x_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .qc_prep = nv_swncq_qc_prep,
+ .qc_issue = nv_swncq_qc_issue,
+
+ .freeze = nv_mcp55_freeze,
+ .thaw = nv_mcp55_thaw,
+ .error_handler = nv_swncq_error_handler,
+
+#ifdef CONFIG_PM
+ .port_suspend = nv_swncq_port_suspend,
+ .port_resume = nv_swncq_port_resume,
+#endif
+ .port_start = nv_swncq_port_start,
+};
+
+struct nv_pi_priv {
+ irq_handler_t irq_handler;
+ struct scsi_host_template *sht;
+};
+
+#define NV_PI_PRIV(_irq_handler, _sht) \
+ &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
+
+static const struct ata_port_info nv_port_info[] = {
+ /* generic */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_generic_ops,
+ .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
+ },
+ /* nforce2/3 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_nf2_ops,
+ .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
+ },
+ /* ck804 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_ck804_ops,
+ .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
+ },
+ /* ADMA */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_adma_ops,
+ .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
+ },
+ /* MCP5x */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_mcp5x_ops,
+ .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
+ },
+ /* SWNCQ */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_NCQ,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_swncq_ops,
+ .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
+ },
+};
+
+MODULE_AUTHOR("NVIDIA");
+MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static int adma_enabled;
+static int swncq_enabled = 1;
+
+static void nv_adma_register_mode(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp, status;
+ int count = 0;
+
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+ return;
+
+ status = readw(mmio + NV_ADMA_STAT);
+ while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
+ ndelay(50);
+ status = readw(mmio + NV_ADMA_STAT);
+ count++;
+ }
+ if (count == 20)
+ ata_port_printk(ap, KERN_WARNING,
+ "timeout waiting for ADMA IDLE, stat=0x%hx\n",
+ status);
+
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+ count = 0;
+ status = readw(mmio + NV_ADMA_STAT);
+ while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
+ ndelay(50);
+ status = readw(mmio + NV_ADMA_STAT);
+ count++;
+ }
+ if (count == 20)
+ ata_port_printk(ap, KERN_WARNING,
+ "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
+ status);
+
+ pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static void nv_adma_mode(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp, status;
+ int count = 0;
+
+ if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
+ return;
+
+ WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
+
+ status = readw(mmio + NV_ADMA_STAT);
+ while (((status & NV_ADMA_STAT_LEGACY) ||
+ !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
+ ndelay(50);
+ status = readw(mmio + NV_ADMA_STAT);
+ count++;
+ }
+ if (count == 20)
+ ata_port_printk(ap, KERN_WARNING,
+ "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
+ status);
+
+ pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
+}
+
+static int nv_adma_slave_config(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct nv_adma_port_priv *pp = ap->private_data;
+ struct nv_adma_port_priv *port0, *port1;
+ struct scsi_device *sdev0, *sdev1;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned long segment_boundary, flags;
+ unsigned short sg_tablesize;
+ int rc;
+ int adma_enable;
+ u32 current_reg, new_reg, config_mask;
+
+ rc = ata_scsi_slave_config(sdev);
+
+ if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+ /* Not a proper libata device, ignore */
+ return rc;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
+ /*
+ * NVIDIA reports that ADMA mode does not support ATAPI commands.
+ * Therefore ATAPI commands are sent through the legacy interface.
+ * However, the legacy interface only supports 32-bit DMA.
+ * Restrict DMA parameters as required by the legacy interface
+ * when an ATAPI device is connected.
+ */
+ segment_boundary = ATA_DMA_BOUNDARY;
+ /* Subtract 1 since an extra entry may be needed for padding, see
+ libata-scsi.c */
+ sg_tablesize = LIBATA_MAX_PRD - 1;
+
+ /* Since the legacy DMA engine is in use, we need to disable ADMA
+ on the port. */
+ adma_enable = 0;
+ nv_adma_register_mode(ap);
+ } else {
+ segment_boundary = NV_ADMA_DMA_BOUNDARY;
+ sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
+ adma_enable = 1;
+ }
+
+ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
+
+ if (ap->port_no == 1)
+ config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+ else
+ config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
+
+ if (adma_enable) {
+ new_reg = current_reg | config_mask;
+ pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
+ } else {
+ new_reg = current_reg & ~config_mask;
+ pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
+ }
+
+ if (current_reg != new_reg)
+ pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
+
+ port0 = ap->host->ports[0]->private_data;
+ port1 = ap->host->ports[1]->private_data;
+ sdev0 = ap->host->ports[0]->link.device[0].sdev;
+ sdev1 = ap->host->ports[1]->link.device[0].sdev;
+ if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+ /** We have to set the DMA mask to 32-bit if either port is in
+ ATAPI mode, since they are on the same PCI device which is
+ used for DMA mapping. If we set the mask we also need to set
+ the bounce limit on both ports to ensure that the block
+ layer doesn't feed addresses that cause DMA mapping to
+ choke. If either SCSI device is not allocated yet, it's OK
+ since that port will discover its correct setting when it
+ does get allocated.
+ Note: Setting 32-bit mask should not fail. */
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ ATA_DMA_MASK);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ ATA_DMA_MASK);
+
+ pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ } else {
+ /** This shouldn't fail as it was set to this value before */
+ pci_set_dma_mask(pdev, pp->adma_dma_mask);
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ pp->adma_dma_mask);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ pp->adma_dma_mask);
+ }
+
+ blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
+ blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
+ ata_port_printk(ap, KERN_INFO,
+ "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+ (unsigned long long)*ap->host->dev->dma_mask,
+ segment_boundary, sg_tablesize);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc;
+}
+
+static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+ return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
+}
+
+static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ /* Other than when internal or pass-through commands are executed,
+ the only time this function will be called in ADMA mode will be
+ if a command fails. In the failure case we don't care about going
+ into register mode with ADMA commands pending, as the commands will
+ all shortly be aborted anyway. We assume that NCQ commands are not
+ issued via passthrough, which is the only way that switching into
+ ADMA mode could abort outstanding commands. */
+ nv_adma_register_mode(ap);
+
+ ata_sff_tf_read(ap, tf);
+}
+
+static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
+{
+ unsigned int idx = 0;
+
+ if (tf->flags & ATA_TFLAG_ISADDR) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
+ cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
+ } else
+ cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
+
+ cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
+ cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
+
+ cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
+
+ while (idx < 12)
+ cpb[idx++] = cpu_to_le16(IGN);
+
+ return idx;
+}
+
+static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ u8 flags = pp->cpb[cpb_num].resp_flags;
+
+ VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+
+ if (unlikely((force_err ||
+ flags & (NV_CPB_RESP_ATA_ERR |
+ NV_CPB_RESP_CMD_ERR |
+ NV_CPB_RESP_CPB_ERR)))) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ int freeze = 0;
+
+ ata_ehi_clear_desc(ehi);
+ __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
+ if (flags & NV_CPB_RESP_ATA_ERR) {
+ ata_ehi_push_desc(ehi, "ATA error");
+ ehi->err_mask |= AC_ERR_DEV;
+ } else if (flags & NV_CPB_RESP_CMD_ERR) {
+ ata_ehi_push_desc(ehi, "CMD error");
+ ehi->err_mask |= AC_ERR_DEV;
+ } else if (flags & NV_CPB_RESP_CPB_ERR) {
+ ata_ehi_push_desc(ehi, "CPB error");
+ ehi->err_mask |= AC_ERR_SYSTEM;
+ freeze = 1;
+ } else {
+ /* notifier error, but no error in CPB flags? */
+ ata_ehi_push_desc(ehi, "unknown");
+ ehi->err_mask |= AC_ERR_OTHER;
+ freeze = 1;
+ }
+ /* Kill all commands. EH will determine what actually failed. */
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+ return 1;
+ }
+
+ if (likely(flags & NV_CPB_RESP_DONE)) {
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
+ VPRINTK("CPB flags done, flags=0x%x\n", flags);
+ if (likely(qc)) {
+ DPRINTK("Completing qc from tag %d\n", cpb_num);
+ ata_qc_complete(qc);
+ } else {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ /* Notifier bits set without a command may indicate the drive
+ is misbehaving. Raise host state machine violation on this
+ condition. */
+ ata_port_printk(ap, KERN_ERR,
+ "notifier for tag %d with no cmd?\n",
+ cpb_num);
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
+{
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+ /* freeze if hotplugged */
+ if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
+ ata_port_freeze(ap);
+ return 1;
+ }
+
+ /* bail out if not our interrupt */
+ if (!(irq_stat & NV_INT_DEV))
+ return 0;
+
+ /* DEV interrupt w/ no active qc? */
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ ata_sff_check_status(ap);
+ return 1;
+ }
+
+ /* handle interrupt */
+ return ata_sff_host_intr(ap, qc);
+}
+
+static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ int i, handled = 0;
+ u32 notifier_clears[2];
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ notifier_clears[i] = 0;
+
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 status;
+ u32 gen_ctl;
+ u32 notifier, notifier_error;
+
+ /* if ADMA is disabled, use standard ata interrupt handler */
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ handled += nv_host_intr(ap, irq_stat);
+ continue;
+ }
+
+ /* if in ATA register mode, check for standard interrupts */
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
+ u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
+ >> (NV_INT_PORT_SHIFT * i);
+ if (ata_tag_valid(ap->link.active_tag))
+ /** NV_INT_DEV indication seems unreliable at times
+ at least in ADMA mode. Force it on always when a
+ command is active, to prevent losing interrupts. */
+ irq_stat |= NV_INT_DEV;
+ handled += nv_host_intr(ap, irq_stat);
+ }
+
+ notifier = readl(mmio + NV_ADMA_NOTIFIER);
+ notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+ notifier_clears[i] = notifier | notifier_error;
+
+ gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+
+ if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+ !notifier_error)
+ /* Nothing to do */
+ continue;
+
+ status = readw(mmio + NV_ADMA_STAT);
+
+ /* Clear status. Ensure the controller sees the clearing before we start
+ looking at any of the CPB statuses, so that any CPB completions after
+ this point in the handler will raise another interrupt. */
+ writew(status, mmio + NV_ADMA_STAT);
+ readw(mmio + NV_ADMA_STAT); /* flush posted write */
+ rmb();
+
+ handled++; /* irq handled if we got here */
+
+ /* freeze if hotplugged or controller error */
+ if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
+ NV_ADMA_STAT_HOTUNPLUG |
+ NV_ADMA_STAT_TIMEOUT |
+ NV_ADMA_STAT_SERROR))) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+ __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
+ if (status & NV_ADMA_STAT_TIMEOUT) {
+ ehi->err_mask |= AC_ERR_SYSTEM;
+ ata_ehi_push_desc(ehi, "timeout");
+ } else if (status & NV_ADMA_STAT_HOTPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hotplug");
+ } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hot unplug");
+ } else if (status & NV_ADMA_STAT_SERROR) {
+ /* let libata analyze SError and figure out the cause */
+ ata_ehi_push_desc(ehi, "SError");
+ } else
+ ata_ehi_push_desc(ehi, "unknown");
+ ata_port_freeze(ap);
+ continue;
+ }
+
+ if (status & (NV_ADMA_STAT_DONE |
+ NV_ADMA_STAT_CPBERR |
+ NV_ADMA_STAT_CMD_COMPLETE)) {
+ u32 check_commands = notifier_clears[i];
+ int pos, error = 0;
+
+ if (status & NV_ADMA_STAT_CPBERR) {
+ /* Check all active commands */
+ if (ata_tag_valid(ap->link.active_tag))
+ check_commands = 1 <<
+ ap->link.active_tag;
+ else
+ check_commands = ap->
+ link.sactive;
+ }
+
+ /** Check CPBs for completed commands */
+ while ((pos = ffs(check_commands)) && !error) {
+ pos--;
+ error = nv_adma_check_cpb(ap, pos,
+ notifier_error & (1 << pos));
+ check_commands &= ~(1 << pos);
+ }
+ }
+ }
+ }
+
+ if (notifier_clears[0] || notifier_clears[1]) {
+ /* Note: Both notifier clear registers must be written
+ if either is set, even if one is zero, according to NVIDIA. */
+ struct nv_adma_port_priv *pp = host->ports[0]->private_data;
+ writel(notifier_clears[0], pp->notifier_clear_block);
+ pp = host->ports[1]->private_data;
+ writel(notifier_clears[1], pp->notifier_clear_block);
+ }
+
+ spin_unlock(&host->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void nv_adma_freeze(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp;
+
+ nv_ck804_freeze(ap);
+
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ return;
+
+ /* clear any outstanding CK804 notifications */
+ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+ /* Disable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+}
+
+static void nv_adma_thaw(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp;
+
+ nv_ck804_thaw(ap);
+
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ return;
+
+ /* Enable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+}
+
+static void nv_adma_irq_clear(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u32 notifier_clears[2];
+
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
+ ata_sff_irq_clear(ap);
+ return;
+ }
+
+ /* clear any outstanding CK804 notifications */
+ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+
+ /* clear ADMA status */
+ writew(0xffff, mmio + NV_ADMA_STAT);
+
+ /* clear notifiers - note both ports need to be written with
+ something even though we are only clearing on one */
+ if (ap->port_no == 0) {
+ notifier_clears[0] = 0xFFFFFFFF;
+ notifier_clears[1] = 0;
+ } else {
+ notifier_clears[0] = 0;
+ notifier_clears[1] = 0xFFFFFFFF;
+ }
+ pp = ap->host->ports[0]->private_data;
+ writel(notifier_clears[0], pp->notifier_clear_block);
+ pp = ap->host->ports[1]->private_data;
+ writel(notifier_clears[1], pp->notifier_clear_block);
+}
+
+static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+ ata_sff_post_internal_cmd(qc);
+}
+
+static int nv_adma_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct nv_adma_port_priv *pp;
+ int rc;
+ void *mem;
+ dma_addr_t mem_dma;
+ void __iomem *mmio;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 tmp;
+
+ VPRINTK("ENTER\n");
+
+ /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
+ pad buffers */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
+ ap->port_no * NV_ADMA_PORT_SIZE;
+ pp->ctl_block = mmio;
+ pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
+ pp->notifier_clear_block = pp->gen_block +
+ NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
+
+ /* Now that the legacy PRD and padding buffer are allocated we can
+ safely raise the DMA mask to allocate the CPB/APRD table.
+ These are allowed to fail since we store the value that ends up
+ being used to set as the bounce limit in slave_config later if
+ needed. */
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ pp->adma_dma_mask = *dev->dma_mask;
+
+ mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
+ &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
+
+ /*
+ * First item in chunk of DMA memory:
+ * 128-byte command parameter block (CPB)
+ * one for each command tag
+ */
+ pp->cpb = mem;
+ pp->cpb_dma = mem_dma;
+
+ writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
+ writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+
+ mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+ mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
+
+ /*
+ * Second item: block of ADMA_SGTBL_LEN s/g entries
+ */
+ pp->aprd = mem;
+ pp->aprd_dma = mem_dma;
+
+ ap->private_data = pp;
+
+ /* clear any outstanding interrupt conditions */
+ writew(0xffff, mmio + NV_ADMA_STAT);
+
+ /* initialize port variables */
+ pp->flags = NV_ADMA_PORT_REGISTER_MODE;
+
+ /* clear CPB fetch count */
+ writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+ /* clear GO for register mode, enable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+ udelay(1);
+ writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+
+ return 0;
+}
+
+static void nv_adma_port_stop(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+
+ VPRINTK("ENTER\n");
+ writew(0, mmio + NV_ADMA_CTL);
+}
+
+#ifdef CONFIG_PM
+static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+
+ /* Go to register mode - clears GO */
+ nv_adma_register_mode(ap);
+
+ /* clear CPB fetch count */
+ writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+ /* disable interrupt, shut down port */
+ writew(0, mmio + NV_ADMA_CTL);
+
+ return 0;
+}
+
+static int nv_adma_port_resume(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ u16 tmp;
+
+ /* set CPB block location */
+ writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
+ writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+
+ /* clear any outstanding interrupt conditions */
+ writew(0xffff, mmio + NV_ADMA_STAT);
+
+ /* initialize port variables */
+ pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
+
+ /* clear CPB fetch count */
+ writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+ /* clear GO for register mode, enable interrupt */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+ udelay(1);
+ writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+
+ return 0;
+}
+#endif
+
+static void nv_adma_setup_port(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ struct ata_ioports *ioport = &ap->ioaddr;
+
+ VPRINTK("ENTER\n");
+
+ mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
+
+ ioport->cmd_addr = mmio;
+ ioport->data_addr = mmio + (ATA_REG_DATA * 4);
+ ioport->error_addr =
+ ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
+ ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
+ ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
+ ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
+ ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
+ ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
+ ioport->status_addr =
+ ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
+ ioport->altstatus_addr =
+ ioport->ctl_addr = mmio + 0x20;
+}
+
+static int nv_adma_host_init(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ unsigned int i;
+ u32 tmp32;
+
+ VPRINTK("ENTER\n");
+
+ /* enable ADMA on the ports */
+ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+ tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+ NV_MCP_SATA_CFG_20_PORT1_EN |
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
+
+ pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+ for (i = 0; i < host->n_ports; i++)
+ nv_adma_setup_port(host->ports[i]);
+
+ return 0;
+}
+
+static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
+ struct scatterlist *sg,
+ int idx,
+ struct nv_adma_prd *aprd)
+{
+ u8 flags = 0;
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ flags |= NV_APRD_WRITE;
+ if (idx == qc->n_elem - 1)
+ flags |= NV_APRD_END;
+ else if (idx != 4)
+ flags |= NV_APRD_CONT;
+
+ aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
+ aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
+ aprd->flags = flags;
+ aprd->packet_len = 0;
+}
+
+static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+ struct nv_adma_prd *aprd;
+ struct scatterlist *sg;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ aprd = (si < 5) ? &cpb->aprd[si] :
+ &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
+ nv_adma_fill_aprd(qc, sg, si, aprd);
+ }
+ if (si > 5)
+ cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
+ else
+ cpb->next_aprd = cpu_to_le64(0);
+}
+
+static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+
+ /* ADMA engine can only be used for non-ATAPI DMA commands,
+ or interrupt-driven no-data commands. */
+ if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ (qc->tf.flags & ATA_TFLAG_POLLING))
+ return 1;
+
+ if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
+ (qc->tf.protocol == ATA_PROT_NODATA))
+ return 0;
+
+ return 1;
+}
+
+static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+ struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
+ u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
+ NV_CPB_CTL_IEN;
+
+ if (nv_adma_use_reg_mode(qc)) {
+ BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+ (qc->flags & ATA_QCFLAG_DMAMAP));
+ nv_adma_register_mode(qc->ap);
+ ata_sff_qc_prep(qc);
+ return;
+ }
+
+ cpb->resp_flags = NV_CPB_RESP_DONE;
+ wmb();
+ cpb->ctl_flags = 0;
+ wmb();
+
+ cpb->len = 3;
+ cpb->tag = qc->tag;
+ cpb->next_cpb_idx = 0;
+
+ /* turn on NCQ flags for NCQ commands */
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
+
+ VPRINTK("qc->flags = 0x%lx\n", qc->flags);
+
+ nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
+
+ if (qc->flags & ATA_QCFLAG_DMAMAP) {
+ nv_adma_fill_sg(qc, cpb);
+ ctl_flags |= NV_CPB_CTL_APRD_VALID;
+ } else
+ memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
+
+ /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
+ until we are finished filling in all of the contents */
+ wmb();
+ cpb->ctl_flags = ctl_flags;
+ wmb();
+ cpb->resp_flags = 0;
+}
+
+static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct nv_adma_port_priv *pp = qc->ap->private_data;
+ void __iomem *mmio = pp->ctl_block;
+ int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
+
+ VPRINTK("ENTER\n");
+
+ /* We can't handle result taskfile with NCQ commands, since
+ retrieving the taskfile switches us out of ADMA mode and would abort
+ existing commands. */
+ if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
+ (qc->flags & ATA_QCFLAG_RESULT_TF))) {
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "NCQ w/ RESULT_TF not allowed\n");
+ return AC_ERR_SYSTEM;
+ }
+
+ if (nv_adma_use_reg_mode(qc)) {
+ /* use ATA register mode */
+ VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
+ BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
+ (qc->flags & ATA_QCFLAG_DMAMAP));
+ nv_adma_register_mode(qc->ap);
+ return ata_sff_qc_issue(qc);
+ } else
+ nv_adma_mode(qc->ap);
+
+ /* write append register, command tag in lower 8 bits
+ and (number of cpbs to append -1) in top 8 bits */
+ wmb();
+
+ if (curr_ncq != pp->last_issue_ncq) {
+ /* Seems to need some delay before switching between NCQ and
+ non-NCQ commands, else we get command timeouts and such. */
+ udelay(20);
+ pp->last_issue_ncq = curr_ncq;
+ }
+
+ writew(qc->tag, mmio + NV_ADMA_APPEND);
+
+ DPRINTK("Issued tag %u\n", qc->tag);
+
+ return 0;
+}
+
+static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ ap = host->ports[i];
+ if (ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled += ata_sff_host_intr(ap, qc);
+ else
+ // No request pending? Clear interrupt status
+ // anyway, in case there's one pending.
+ ap->ops->sff_check_status(ap);
+ }
+
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
+{
+ int i, handled = 0;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED))
+ handled += nv_host_intr(ap, irq_stat);
+
+ irq_stat >>= NV_INT_PORT_SHIFT;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ u8 irq_stat;
+ irqreturn_t ret;
+
+ spin_lock(&host->lock);
+ irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
+ ret = nv_do_interrupt(host, irq_stat);
+ spin_unlock(&host->lock);
+
+ return ret;
+}
+
+static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ u8 irq_stat;
+ irqreturn_t ret;
+
+ spin_lock(&host->lock);
+ irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
+ ret = nv_do_interrupt(host, irq_stat);
+ spin_unlock(&host->lock);
+
+ return ret;
+}
+
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+
+ *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+
+ iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+static int nv_noclassify_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ bool online;
+ int rc;
+
+ rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
+ &online, NULL);
+ return online ? -EAGAIN : rc;
+}
+
+static void nv_nf2_freeze(struct ata_port *ap)
+{
+ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
+ int shift = ap->port_no * NV_INT_PORT_SHIFT;
+ u8 mask;
+
+ mask = ioread8(scr_addr + NV_INT_ENABLE);
+ mask &= ~(NV_INT_ALL << shift);
+ iowrite8(mask, scr_addr + NV_INT_ENABLE);
+}
+
+static void nv_nf2_thaw(struct ata_port *ap)
+{
+ void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
+ int shift = ap->port_no * NV_INT_PORT_SHIFT;
+ u8 mask;
+
+ iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
+
+ mask = ioread8(scr_addr + NV_INT_ENABLE);
+ mask |= (NV_INT_MASK << shift);
+ iowrite8(mask, scr_addr + NV_INT_ENABLE);
+}
+
+static void nv_ck804_freeze(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT;
+ u8 mask;
+
+ mask = readb(mmio_base + NV_INT_ENABLE_CK804);
+ mask &= ~(NV_INT_ALL << shift);
+ writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
+}
+
+static void nv_ck804_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT;
+ u8 mask;
+
+ writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
+
+ mask = readb(mmio_base + NV_INT_ENABLE_CK804);
+ mask |= (NV_INT_MASK << shift);
+ writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
+}
+
+static void nv_mcp55_freeze(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+ u32 mask;
+
+ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+ mask &= ~(NV_INT_ALL_MCP55 << shift);
+ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+ ata_sff_freeze(ap);
+}
+
+static void nv_mcp55_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+ u32 mask;
+
+ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+ mask |= (NV_INT_MASK_MCP55 << shift);
+ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+ ata_sff_thaw(ap);
+}
+
+static void nv_adma_error_handler(struct ata_port *ap)
+{
+ struct nv_adma_port_priv *pp = ap->private_data;
+ if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+ void __iomem *mmio = pp->ctl_block;
+ int i;
+ u16 tmp;
+
+ if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
+ u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
+ u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
+ u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
+ u32 status = readw(mmio + NV_ADMA_STAT);
+ u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
+ u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
+
+ ata_port_printk(ap, KERN_ERR,
+ "EH in ADMA mode, notifier 0x%X "
+ "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
+ "next cpb count 0x%X next cpb idx 0x%x\n",
+ notifier, notifier_error, gen_ctl, status,
+ cpb_count, next_cpb_idx);
+
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
+ struct nv_adma_cpb *cpb = &pp->cpb[i];
+ if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+ ap->link.sactive & (1 << i))
+ ata_port_printk(ap, KERN_ERR,
+ "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
+ i, cpb->ctl_flags, cpb->resp_flags);
+ }
+ }
+
+ /* Push us back into port register mode for error handling. */
+ nv_adma_register_mode(ap);
+
+ /* Mark all of the CPBs as invalid to prevent them from
+ being executed */
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
+ pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
+
+ /* clear CPB fetch count */
+ writew(0, mmio + NV_ADMA_CPB_COUNT);
+
+ /* Reset channel */
+ tmp = readw(mmio + NV_ADMA_CTL);
+ writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+ udelay(1);
+ writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+ }
+
+ ata_sff_error_handler(ap);
+}
+
+static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+
+ /* queue is full */
+ WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
+ dq->defer_bits |= (1 << qc->tag);
+ dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
+}
+
+static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+ unsigned int tag;
+
+ if (dq->head == dq->tail) /* null queue */
+ return NULL;
+
+ tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
+ dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
+ WARN_ON(!(dq->defer_bits & (1 << tag)));
+ dq->defer_bits &= ~(1 << tag);
+
+ return ata_qc_from_tag(ap, tag);
+}
+
+static void nv_swncq_fis_reinit(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ pp->dhfis_bits = 0;
+ pp->dmafis_bits = 0;
+ pp->sdbfis_bits = 0;
+ pp->ncq_flags = 0;
+}
+
+static void nv_swncq_pp_reinit(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+
+ dq->head = 0;
+ dq->tail = 0;
+ dq->defer_bits = 0;
+ pp->qc_active = 0;
+ pp->last_issue_tag = ATA_TAG_POISON;
+ nv_swncq_fis_reinit(ap);
+}
+
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ writew(fis, pp->irq_block);
+}
+
+static void __ata_bmdma_stop(struct ata_port *ap)
+{
+ struct ata_queued_cmd qc;
+
+ qc.ap = ap;
+ ata_bmdma_stop(&qc);
+}
+
+static void nv_swncq_ncq_stop(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ unsigned int i;
+ u32 sactive;
+ u32 done_mask;
+
+ ata_port_printk(ap, KERN_ERR,
+ "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
+ ap->qc_active, ap->link.sactive);
+ ata_port_printk(ap, KERN_ERR,
+ "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
+ "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
+ pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
+ pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
+
+ ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
+ ap->ops->sff_check_status(ap),
+ ioread8(ap->ioaddr.error_addr));
+
+ sactive = readl(pp->sactive_block);
+ done_mask = pp->qc_active ^ sactive;
+
+ ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ u8 err = 0;
+ if (pp->qc_active & (1 << i))
+ err = 0;
+ else if (done_mask & (1 << i))
+ err = 1;
+ else
+ continue;
+
+ ata_port_printk(ap, KERN_ERR,
+ "tag 0x%x: %01x %01x %01x %01x %s\n", i,
+ (pp->dhfis_bits >> i) & 0x1,
+ (pp->dmafis_bits >> i) & 0x1,
+ (pp->sdbfis_bits >> i) & 0x1,
+ (sactive >> i) & 0x1,
+ (err ? "error! tag doesn't exit" : " "));
+ }
+
+ nv_swncq_pp_reinit(ap);
+ ap->ops->sff_irq_clear(ap);
+ __ata_bmdma_stop(ap);
+ nv_swncq_irq_clear(ap, 0xffff);
+}
+
+static void nv_swncq_error_handler(struct ata_port *ap)
+{
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+
+ if (ap->link.sactive) {
+ nv_swncq_ncq_stop(ap);
+ ehc->i.action |= ATA_EH_RESET;
+ }
+
+ ata_sff_error_handler(ap);
+}
+
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ u32 tmp;
+
+ /* clear irq */
+ writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+ /* disable irq */
+ writel(0, mmio + NV_INT_ENABLE_MCP55);
+
+ /* disable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
+ writel(tmp, mmio + NV_CTL_MCP55);
+
+ return 0;
+}
+
+static int nv_swncq_port_resume(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ u32 tmp;
+
+ /* clear irq */
+ writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+ /* enable irq */
+ writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+ /* enable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+ return 0;
+}
+#endif
+
+static void nv_swncq_host_init(struct ata_host *host)
+{
+ u32 tmp;
+ void __iomem *mmio = host->iomap[NV_MMIO_BAR];
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u8 regval;
+
+ /* disable ECO 398 */
+ pci_read_config_byte(pdev, 0x7f, &regval);
+ regval &= ~(1 << 7);
+ pci_write_config_byte(pdev, 0x7f, regval);
+
+ /* enable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ VPRINTK("HOST_CTL:0x%X\n", tmp);
+ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+ /* enable irq intr */
+ tmp = readl(mmio + NV_INT_ENABLE_MCP55);
+ VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+ writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+ /* clear port irq */
+ writel(~0x0, mmio + NV_INT_STATUS_MCP55);
+}
+
+static int nv_swncq_slave_config(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_device *dev;
+ int rc;
+ u8 rev;
+ u8 check_maxtor = 0;
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+ rc = ata_scsi_slave_config(sdev);
+ if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+ /* Not a proper libata device, ignore */
+ return rc;
+
+ dev = &ap->link.device[sdev->id];
+ if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
+ return rc;
+
+ /* if MCP51 and Maxtor, then disable ncq */
+ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
+ check_maxtor = 1;
+
+ /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
+ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
+ pci_read_config_byte(pdev, 0x8, &rev);
+ if (rev <= 0xa2)
+ check_maxtor = 1;
+ }
+
+ if (!check_maxtor)
+ return rc;
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ if (strncmp(model_num, "Maxtor", 6) == 0) {
+ ata_scsi_change_queue_depth(sdev, 1);
+ ata_dev_printk(dev, KERN_NOTICE,
+ "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
+ }
+
+ return rc;
+}
+
+static int nv_swncq_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ struct nv_swncq_port_priv *pp;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
+ &pp->prd_dma, GFP_KERNEL);
+ if (!pp->prd)
+ return -ENOMEM;
+ memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
+
+ ap->private_data = pp;
+ pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
+ pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
+ pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
+
+ return 0;
+}
+
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (qc->tf.protocol != ATA_PROT_NCQ) {
+ ata_sff_qc_prep(qc);
+ return;
+ }
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ nv_swncq_fill_sg(qc);
+}
+
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_prd *prd;
+ unsigned int si, idx;
+
+ prd = pp->prd + ATA_MAX_PRD * qc->tag;
+
+ idx = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32)sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ if (qc == NULL)
+ return 0;
+
+ DPRINTK("Enter\n");
+
+ writel((1 << qc->tag), pp->sactive_block);
+ pp->last_issue_tag = qc->tag;
+ pp->dhfis_bits &= ~(1 << qc->tag);
+ pp->dmafis_bits &= ~(1 << qc->tag);
+ pp->qc_active |= (0x1 << qc->tag);
+
+ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+
+ DPRINTK("Issued tag %u\n", qc->tag);
+
+ return 0;
+}
+
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ if (qc->tf.protocol != ATA_PROT_NCQ)
+ return ata_sff_qc_issue(qc);
+
+ DPRINTK("Enter\n");
+
+ if (!pp->qc_active)
+ nv_swncq_issue_atacmd(ap, qc);
+ else
+ nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
+
+ return 0;
+}
+
+static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
+{
+ u32 serror;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ sata_scr_read(&ap->link, SCR_ERROR, &serror);
+ sata_scr_write(&ap->link, SCR_ERROR, serror);
+
+ /* analyze @irq_stat */
+ if (fis & NV_SWNCQ_IRQ_ADDED)
+ ata_ehi_push_desc(ehi, "hot plug");
+ else if (fis & NV_SWNCQ_IRQ_REMOVED)
+ ata_ehi_push_desc(ehi, "hot unplug");
+
+ ata_ehi_hotplugged(ehi);
+
+ /* okay, let's hand over to EH */
+ ehi->serror |= serror;
+
+ ata_port_freeze(ap);
+}
+
+static int nv_swncq_sdbfis(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u32 sactive;
+ int nr_done = 0;
+ u32 done_mask;
+ int i;
+ u8 host_stat;
+ u8 lack_dhfis = 0;
+
+ host_stat = ap->ops->bmdma_status(ap);
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+ ehi->err_mask |= AC_ERR_HOST_BUS;
+ ehi->action |= ATA_EH_RESET;
+ return -EINVAL;
+ }
+
+ ap->ops->sff_irq_clear(ap);
+ __ata_bmdma_stop(ap);
+
+ sactive = readl(pp->sactive_block);
+ done_mask = pp->qc_active ^ sactive;
+
+ if (unlikely(done_mask & sactive)) {
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
+ "(%08x->%08x)", pp->qc_active, sactive);
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ return -EINVAL;
+ }
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ if (!(done_mask & (1 << i)))
+ continue;
+
+ qc = ata_qc_from_tag(ap, i);
+ if (qc) {
+ ata_qc_complete(qc);
+ pp->qc_active &= ~(1 << i);
+ pp->dhfis_bits &= ~(1 << i);
+ pp->dmafis_bits &= ~(1 << i);
+ pp->sdbfis_bits |= (1 << i);
+ nr_done++;
+ }
+ }
+
+ if (!ap->qc_active) {
+ DPRINTK("over\n");
+ nv_swncq_pp_reinit(ap);
+ return nr_done;
+ }
+
+ if (pp->qc_active & pp->dhfis_bits)
+ return nr_done;
+
+ if ((pp->ncq_flags & ncq_saw_backout) ||
+ (pp->qc_active ^ pp->dhfis_bits))
+ /* if the controller cann't get a device to host register FIS,
+ * The driver needs to reissue the new command.
+ */
+ lack_dhfis = 1;
+
+ DPRINTK("id 0x%x QC: qc_active 0x%x,"
+ "SWNCQ:qc_active 0x%X defer_bits %X "
+ "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+ ap->print_id, ap->qc_active, pp->qc_active,
+ pp->defer_queue.defer_bits, pp->dhfis_bits,
+ pp->dmafis_bits, pp->last_issue_tag);
+
+ nv_swncq_fis_reinit(ap);
+
+ if (lack_dhfis) {
+ qc = ata_qc_from_tag(ap, pp->last_issue_tag);
+ nv_swncq_issue_atacmd(ap, qc);
+ return nr_done;
+ }
+
+ if (pp->defer_queue.defer_bits) {
+ /* send deferral queue command */
+ qc = nv_swncq_qc_from_dq(ap);
+ WARN_ON(qc == NULL);
+ nv_swncq_issue_atacmd(ap, qc);
+ }
+
+ return nr_done;
+}
+
+static inline u32 nv_swncq_tag(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ u32 tag;
+
+ tag = readb(pp->tag_block) >> 2;
+ return (tag & 0x1f);
+}
+
+static int nv_swncq_dmafis(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ unsigned int rw;
+ u8 dmactl;
+ u32 tag;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ __ata_bmdma_stop(ap);
+ tag = nv_swncq_tag(ap);
+
+ DPRINTK("dma setup tag 0x%x\n", tag);
+ qc = ata_qc_from_tag(ap, tag);
+
+ if (unlikely(!qc))
+ return 0;
+
+ rw = qc->tf.flags & ATA_TFLAG_WRITE;
+
+ /* load PRD table addr. */
+ iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
+ ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~ATA_DMA_WR;
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+
+ iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ return 1;
+}
+
+static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u32 serror;
+ u8 ata_stat;
+ int rc = 0;
+
+ ata_stat = ap->ops->sff_check_status(ap);
+ nv_swncq_irq_clear(ap, fis);
+ if (!fis)
+ return;
+
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ return;
+
+ if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
+ nv_swncq_hotplug(ap, fis);
+ return;
+ }
+
+ if (!pp->qc_active)
+ return;
+
+ if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
+ return;
+ ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
+
+ if (ata_stat & ATA_ERR) {
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
+ ehi->err_mask |= AC_ERR_DEV;
+ ehi->serror |= serror;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ return;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_BACKOUT) {
+ /* If the IRQ is backout, driver must issue
+ * the new command again some time later.
+ */
+ pp->ncq_flags |= ncq_saw_backout;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_SDBFIS) {
+ pp->ncq_flags |= ncq_saw_sdb;
+ DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+ "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
+ ap->print_id, pp->qc_active, pp->dhfis_bits,
+ pp->dmafis_bits, readl(pp->sactive_block));
+ rc = nv_swncq_sdbfis(ap);
+ if (rc < 0)
+ goto irq_error;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
+ /* The interrupt indicates the new command
+ * was transmitted correctly to the drive.
+ */
+ pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
+ pp->ncq_flags |= ncq_saw_d2h;
+ if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
+ ata_ehi_push_desc(ehi, "illegal fis transaction");
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ goto irq_error;
+ }
+
+ if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
+ !(pp->ncq_flags & ncq_saw_dmas)) {
+ ata_stat = ap->ops->sff_check_status(ap);
+ if (ata_stat & ATA_BUSY)
+ goto irq_exit;
+
+ if (pp->defer_queue.defer_bits) {
+ DPRINTK("send next command\n");
+ qc = nv_swncq_qc_from_dq(ap);
+ nv_swncq_issue_atacmd(ap, qc);
+ }
+ }
+ }
+
+ if (fis & NV_SWNCQ_IRQ_DMASETUP) {
+ /* program the dma controller with appropriate PRD buffers
+ * and start the DMA transfer for requested command.
+ */
+ pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
+ pp->ncq_flags |= ncq_saw_dmas;
+ rc = nv_swncq_dmafis(ap);
+ }
+
+irq_exit:
+ return;
+irq_error:
+ ata_ehi_push_desc(ehi, "fis:0x%x", fis);
+ ata_port_freeze(ap);
+ return;
+}
+
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+ u32 irq_stat;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (ap->link.sactive) {
+ nv_swncq_host_interrupt(ap, (u16)irq_stat);
+ handled = 1;
+ } else {
+ if (irq_stat) /* reserve Hotplug */
+ nv_swncq_irq_clear(ap, 0xfff0);
+
+ handled += nv_host_intr(ap, (u8)irq_stat);
+ }
+ }
+ irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { NULL, NULL };
+ struct nv_pi_priv *ipriv;
+ struct ata_host *host;
+ struct nv_host_priv *hpriv;
+ int rc;
+ u32 bar;
+ void __iomem *base;
+ unsigned long type = ent->driver_data;
+
+ // Make sure this is a SATA controller by counting the number of bars
+ // (NVIDIA SATA controllers will always have six bars). Otherwise,
+ // it's an IDE controller and we ignore it.
+ for (bar = 0; bar < 6; bar++)
+ if (pci_resource_start(pdev, bar) == 0)
+ return -ENODEV;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* determine type and allocate host */
+ if (type == CK804 && adma_enabled) {
+ dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+ type = ADMA;
+ } else if (type == MCP5x && swncq_enabled) {
+ dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
+ type = SWNCQ;
+ }
+
+ ppi[0] = &nv_port_info[type];
+ ipriv = ppi[0]->private_data;
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ hpriv->type = type;
+ host->private_data = hpriv;
+
+ /* request and iomap NV_MMIO_BAR */
+ rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
+ if (rc)
+ return rc;
+
+ /* configure SCR access */
+ base = host->iomap[NV_MMIO_BAR];
+ host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
+ host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
+
+ /* enable SATA space for CK804 */
+ if (type >= CK804) {
+ u8 regval;
+
+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+ }
+
+ /* init ADMA */
+ if (type == ADMA) {
+ rc = nv_adma_host_init(host);
+ if (rc)
+ return rc;
+ } else if (type == SWNCQ)
+ nv_swncq_host_init(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
+ IRQF_SHARED, ipriv->sht);
+}
+
+#ifdef CONFIG_PM
+static int nv_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct nv_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ if (hpriv->type >= CK804) {
+ u8 regval;
+
+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+ regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+ }
+ if (hpriv->type == ADMA) {
+ u32 tmp32;
+ struct nv_adma_port_priv *pp;
+ /* enable/disable ADMA on the ports appropriately */
+ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+
+ pp = host->ports[0]->private_data;
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ else
+ tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ pp = host->ports[1]->private_data;
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+ else
+ tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+ pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+ }
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
+
+static void nv_ck804_host_stop(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u8 regval;
+
+ /* disable SATA space for CK804 */
+ pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
+ regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
+ pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
+}
+
+static void nv_adma_host_stop(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u32 tmp32;
+
+ /* disable ADMA on the ports */
+ pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
+ tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
+ NV_MCP_SATA_CFG_20_PORT1_EN |
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+
+ pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
+
+ nv_ck804_host_stop(host);
+}
+
+static int __init nv_init(void)
+{
+ return pci_register_driver(&nv_pci_driver);
+}
+
+static void __exit nv_exit(void)
+{
+ pci_unregister_driver(&nv_pci_driver);
+}
+
+module_init(nv_init);
+module_exit(nv_exit);
+module_param_named(adma, adma_enabled, bool, 0444);
+MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
+module_param_named(swncq, swncq_enabled, bool, 0444);
+MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
+
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
new file mode 100644
index 0000000..ba9a257
--- /dev/null
+++ b/drivers/ata/sata_promise.c
@@ -0,0 +1,1151 @@
+/*
+ * sata_promise.c - Promise SATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Mikael Pettersson <mikpe@it.uu.se>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware information only available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "sata_promise.h"
+
+#define DRV_NAME "sata_promise"
+#define DRV_VERSION "2.12"
+
+enum {
+ PDC_MAX_PORTS = 4,
+ PDC_MMIO_BAR = 3,
+ PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
+
+ /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
+ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
+ PDC_FLASH_CTL = 0x44, /* Flash control register */
+ PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
+ PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
+ PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
+ PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
+
+ /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
+ PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
+ PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
+ PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
+ PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */
+ PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */
+ PDC_DEVICE = 0x18, /* Device/Head reg (per port) */
+ PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
+ PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
+ PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
+ PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
+ PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
+
+ /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
+ PDC_PHYMODE4 = 0x14,
+
+ /* PDC_GLOBAL_CTL bit definitions */
+ PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
+ PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */
+ PDC_DH_ERR = (1 << 10), /* PCI error while loading data */
+ PDC2_HTO_ERR = (1 << 12), /* host bus timeout */
+ PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */
+ PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */
+ PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */
+ PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */
+ PDC_DRIVE_ERR = (1 << 21), /* drive error */
+ PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
+ PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
+ PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
+ PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
+ PDC2_ATA_DMA_CNT_ERR,
+ PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
+ PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
+ PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
+ PDC1_ERR_MASK | PDC2_ERR_MASK,
+
+ board_2037x = 0, /* FastTrak S150 TX2plus */
+ board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
+ board_20319 = 2, /* FastTrak S150 TX4 */
+ board_20619 = 3, /* FastTrak TX4000 */
+ board_2057x = 4, /* SATAII150 Tx2plus */
+ board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */
+ board_40518 = 6, /* SATAII150 Tx4 */
+
+ PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
+
+ /* Sequence counter control registers bit definitions */
+ PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */
+
+ /* Feature register values */
+ PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */
+ PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */
+
+ /* Device/Head register values */
+ PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */
+
+ /* PDC_CTLSTAT bit definitions */
+ PDC_DMA_ENABLE = (1 << 7),
+ PDC_IRQ_DISABLE = (1 << 10),
+ PDC_RESET = (1 << 11), /* HDMA reset */
+
+ PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO |
+ ATA_FLAG_PIO_POLLING,
+
+ /* ap->flags bits */
+ PDC_FLAG_GEN_II = (1 << 24),
+ PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */
+ PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */
+};
+
+struct pdc_port_priv {
+ u8 *pkt;
+ dma_addr_t pkt_dma;
+};
+
+static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc_common_port_start(struct ata_port *ap);
+static int pdc_sata_port_start(struct ata_port *ap);
+static void pdc_qc_prep(struct ata_queued_cmd *qc);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
+static void pdc_irq_clear(struct ata_port *ap);
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_sata_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static void pdc_sata_thaw(struct ata_port *ap);
+static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void pdc_error_handler(struct ata_port *ap);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
+static int pdc_pata_cable_detect(struct ata_port *ap);
+static int pdc_sata_cable_detect(struct ata_port *ap);
+
+static struct scsi_host_template pdc_ata_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = PDC_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static const struct ata_port_operations pdc_common_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_tf_load = pdc_tf_load_mmio,
+ .sff_exec_command = pdc_exec_command_mmio,
+ .check_atapi_dma = pdc_check_atapi_dma,
+ .qc_prep = pdc_qc_prep,
+ .qc_issue = pdc_qc_issue,
+ .sff_irq_clear = pdc_irq_clear,
+
+ .post_internal_cmd = pdc_post_internal_cmd,
+ .error_handler = pdc_error_handler,
+};
+
+static struct ata_port_operations pdc_sata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_sata_cable_detect,
+ .freeze = pdc_sata_freeze,
+ .thaw = pdc_sata_thaw,
+ .scr_read = pdc_sata_scr_read,
+ .scr_write = pdc_sata_scr_write,
+ .port_start = pdc_sata_port_start,
+ .hardreset = pdc_sata_hardreset,
+};
+
+/* First-generation chips need a more restrictive ->check_atapi_dma op */
+static struct ata_port_operations pdc_old_sata_ops = {
+ .inherits = &pdc_sata_ops,
+ .check_atapi_dma = pdc_old_sata_check_atapi_dma,
+};
+
+static struct ata_port_operations pdc_pata_ops = {
+ .inherits = &pdc_common_ops,
+ .cable_detect = pdc_pata_cable_detect,
+ .freeze = pdc_freeze,
+ .thaw = pdc_thaw,
+ .port_start = pdc_common_port_start,
+ .softreset = pdc_pata_softreset,
+};
+
+static const struct ata_port_info pdc_port_info[] = {
+ [board_2037x] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+ PDC_FLAG_SATA_PATA,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_old_sata_ops,
+ },
+
+ [board_2037x_pata] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_pata_ops,
+ },
+
+ [board_20319] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+ PDC_FLAG_4_PORTS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_old_sata_ops,
+ },
+
+ [board_20619] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+ PDC_FLAG_4_PORTS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_pata_ops,
+ },
+
+ [board_2057x] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+ PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_sata_ops,
+ },
+
+ [board_2057x_pata] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
+ PDC_FLAG_GEN_II,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_pata_ops,
+ },
+
+ [board_40518] =
+ {
+ .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
+ PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_sata_ops,
+ },
+};
+
+static const struct pci_device_id pdc_ata_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
+ { PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
+ { PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
+ { PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
+ { PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
+ { PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
+ { PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
+ { PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
+ { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
+ { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
+
+ { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
+ { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
+ { PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
+ { PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
+ { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
+ { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
+
+ { PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver pdc_ata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pdc_ata_pci_tbl,
+ .probe = pdc_ata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int pdc_common_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct pdc_port_priv *pp;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+
+ ap->private_data = pp;
+
+ return 0;
+}
+
+static int pdc_sata_port_start(struct ata_port *ap)
+{
+ int rc;
+
+ rc = pdc_common_port_start(ap);
+ if (rc)
+ return rc;
+
+ /* fix up PHYMODE4 align timing */
+ if (ap->flags & PDC_FLAG_GEN_II) {
+ void __iomem *sata_mmio = ap->ioaddr.scr_addr;
+ unsigned int tmp;
+
+ tmp = readl(sata_mmio + PDC_PHYMODE4);
+ tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
+ writel(tmp, sata_mmio + PDC_PHYMODE4);
+ }
+
+ return 0;
+}
+
+static void pdc_reset_port(struct ata_port *ap)
+{
+ void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+ unsigned int i;
+ u32 tmp;
+
+ for (i = 11; i > 0; i--) {
+ tmp = readl(ata_ctlstat_mmio);
+ if (tmp & PDC_RESET)
+ break;
+
+ udelay(100);
+
+ tmp |= PDC_RESET;
+ writel(tmp, ata_ctlstat_mmio);
+ }
+
+ tmp &= ~PDC_RESET;
+ writel(tmp, ata_ctlstat_mmio);
+ readl(ata_ctlstat_mmio); /* flush */
+}
+
+static int pdc_pata_cable_detect(struct ata_port *ap)
+{
+ u8 tmp;
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+
+ tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
+ if (tmp & 0x01)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+static int pdc_sata_cable_detect(struct ata_port *ap)
+{
+ return ATA_CBL_SATA;
+}
+
+static int pdc_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+static int pdc_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ dma_addr_t sg_table = ap->prd_dma;
+ unsigned int cdb_len = qc->dev->cdb_len;
+ u8 *cdb = qc->cdb;
+ struct pdc_port_priv *pp = ap->private_data;
+ u8 *buf = pp->pkt;
+ __le32 *buf32 = (__le32 *) buf;
+ unsigned int dev_sel, feature;
+
+ /* set control bits (byte 0), zero delay seq id (byte 3),
+ * and seq id (byte 2)
+ */
+ switch (qc->tf.protocol) {
+ case ATAPI_PROT_DMA:
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ buf32[0] = cpu_to_le32(PDC_PKT_READ);
+ else
+ buf32[0] = 0;
+ break;
+ case ATAPI_PROT_NODATA:
+ buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
+ buf32[2] = 0; /* no next-packet */
+
+ /* select drive */
+ if (sata_scr_valid(&ap->link))
+ dev_sel = PDC_DEVICE_SATA;
+ else
+ dev_sel = qc->tf.device;
+
+ buf[12] = (1 << 5) | ATA_REG_DEVICE;
+ buf[13] = dev_sel;
+ buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
+ buf[15] = dev_sel; /* once more, waiting for BSY to clear */
+
+ buf[16] = (1 << 5) | ATA_REG_NSECT;
+ buf[17] = qc->tf.nsect;
+ buf[18] = (1 << 5) | ATA_REG_LBAL;
+ buf[19] = qc->tf.lbal;
+
+ /* set feature and byte counter registers */
+ if (qc->tf.protocol != ATAPI_PROT_DMA)
+ feature = PDC_FEATURE_ATAPI_PIO;
+ else
+ feature = PDC_FEATURE_ATAPI_DMA;
+
+ buf[20] = (1 << 5) | ATA_REG_FEATURE;
+ buf[21] = feature;
+ buf[22] = (1 << 5) | ATA_REG_BYTEL;
+ buf[23] = qc->tf.lbam;
+ buf[24] = (1 << 5) | ATA_REG_BYTEH;
+ buf[25] = qc->tf.lbah;
+
+ /* send ATAPI packet command 0xA0 */
+ buf[26] = (1 << 5) | ATA_REG_CMD;
+ buf[27] = qc->tf.command;
+
+ /* select drive and check DRQ */
+ buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
+ buf[29] = dev_sel;
+
+ /* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
+ BUG_ON(cdb_len & ~0x1E);
+
+ /* append the CDB as the final part */
+ buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
+ memcpy(buf+31, cdb, cdb_len);
+}
+
+/**
+ * pdc_fill_sg - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
+ *
+ * Fill PCI IDE PRD (scatter-gather) table with segments
+ * associated with the current disk command.
+ * Make sure hardware does not choke on it.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ */
+static void pdc_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ const u32 SG_COUNT_ASIC_BUG = 41*4;
+ unsigned int si, idx;
+ u32 len;
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ idx = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, offset;
+ u32 sg_len;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ ap->prd[idx].addr = cpu_to_le32(addr);
+ ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ len = le32_to_cpu(ap->prd[idx - 1].flags_len);
+
+ if (len > SG_COUNT_ASIC_BUG) {
+ u32 addr;
+
+ VPRINTK("Splitting last PRD.\n");
+
+ addr = le32_to_cpu(ap->prd[idx - 1].addr);
+ ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+
+ addr = addr + len - SG_COUNT_ASIC_BUG;
+ len = SG_COUNT_ASIC_BUG;
+ ap->prd[idx].addr = cpu_to_le32(addr);
+ ap->prd[idx].flags_len = cpu_to_le32(len);
+ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+ idx++;
+ }
+
+ ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static void pdc_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct pdc_port_priv *pp = qc->ap->private_data;
+ unsigned int i;
+
+ VPRINTK("ENTER\n");
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ pdc_fill_sg(qc);
+ /*FALLTHROUGH*/
+ case ATA_PROT_NODATA:
+ i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma,
+ qc->dev->devno, pp->pkt);
+ if (qc->tf.flags & ATA_TFLAG_LBA48)
+ i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
+ else
+ i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
+ pdc_pkt_footer(&qc->tf, pp->pkt, i);
+ break;
+ case ATAPI_PROT_PIO:
+ pdc_fill_sg(qc);
+ break;
+ case ATAPI_PROT_DMA:
+ pdc_fill_sg(qc);
+ /*FALLTHROUGH*/
+ case ATAPI_PROT_NODATA:
+ pdc_atapi_pkt(qc);
+ break;
+ default:
+ break;
+ }
+}
+
+static int pdc_is_sataii_tx4(unsigned long flags)
+{
+ const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
+ return (flags & mask) == mask;
+}
+
+static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
+ int is_sataii_tx4)
+{
+ static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
+ return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
+}
+
+static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
+{
+ return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
+}
+
+static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
+{
+ const struct ata_host *host = ap->host;
+ unsigned int nr_ports = pdc_sata_nr_ports(ap);
+ unsigned int i;
+
+ for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
+ ;
+ BUG_ON(i >= nr_ports);
+ return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
+}
+
+static unsigned int pdc_sata_hotplug_offset(const struct ata_port *ap)
+{
+ return (ap->flags & PDC_FLAG_GEN_II) ? PDC2_SATA_PLUG_CSR : PDC_SATA_PLUG_CSR;
+}
+
+static void pdc_freeze(struct ata_port *ap)
+{
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+ u32 tmp;
+
+ tmp = readl(ata_mmio + PDC_CTLSTAT);
+ tmp |= PDC_IRQ_DISABLE;
+ tmp &= ~PDC_DMA_ENABLE;
+ writel(tmp, ata_mmio + PDC_CTLSTAT);
+ readl(ata_mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_sata_freeze(struct ata_port *ap)
+{
+ struct ata_host *host = ap->host;
+ void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+ unsigned int hotplug_offset = pdc_sata_hotplug_offset(ap);
+ unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
+ u32 hotplug_status;
+
+ /* Disable hotplug events on this port.
+ *
+ * Locking:
+ * 1) hotplug register accesses must be serialised via host->lock
+ * 2) ap->lock == &ap->host->lock
+ * 3) ->freeze() and ->thaw() are called with ap->lock held
+ */
+ hotplug_status = readl(host_mmio + hotplug_offset);
+ hotplug_status |= 0x11 << (ata_no + 16);
+ writel(hotplug_status, host_mmio + hotplug_offset);
+ readl(host_mmio + hotplug_offset); /* flush */
+
+ pdc_freeze(ap);
+}
+
+static void pdc_thaw(struct ata_port *ap)
+{
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+ u32 tmp;
+
+ /* clear IRQ */
+ readl(ata_mmio + PDC_COMMAND);
+
+ /* turn IRQ back on */
+ tmp = readl(ata_mmio + PDC_CTLSTAT);
+ tmp &= ~PDC_IRQ_DISABLE;
+ writel(tmp, ata_mmio + PDC_CTLSTAT);
+ readl(ata_mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_sata_thaw(struct ata_port *ap)
+{
+ struct ata_host *host = ap->host;
+ void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+ unsigned int hotplug_offset = pdc_sata_hotplug_offset(ap);
+ unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
+ u32 hotplug_status;
+
+ pdc_thaw(ap);
+
+ /* Enable hotplug events on this port.
+ * Locking: see pdc_sata_freeze().
+ */
+ hotplug_status = readl(host_mmio + hotplug_offset);
+ hotplug_status |= 0x11 << ata_no;
+ hotplug_status &= ~(0x11 << (ata_no + 16));
+ writel(hotplug_status, host_mmio + hotplug_offset);
+ readl(host_mmio + hotplug_offset); /* flush */
+}
+
+static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ pdc_reset_port(link->ap);
+ return ata_sff_softreset(link, class, deadline);
+}
+
+static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ pdc_reset_port(link->ap);
+ return sata_sff_hardreset(link, class, deadline);
+}
+
+static void pdc_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN))
+ pdc_reset_port(ap);
+
+ ata_std_error_handler(ap);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ pdc_reset_port(ap);
+}
+
+static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 port_status, u32 err_mask)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ unsigned int ac_err_mask = 0;
+
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
+ port_status &= err_mask;
+
+ if (port_status & PDC_DRIVE_ERR)
+ ac_err_mask |= AC_ERR_DEV;
+ if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
+ ac_err_mask |= AC_ERR_HSM;
+ if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
+ ac_err_mask |= AC_ERR_ATA_BUS;
+ if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
+ | PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
+ ac_err_mask |= AC_ERR_HOST_BUS;
+
+ if (sata_scr_valid(&ap->link)) {
+ u32 serror;
+
+ pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
+ ehi->serror |= serror;
+ }
+
+ qc->err_mask |= ac_err_mask;
+
+ pdc_reset_port(ap);
+
+ ata_port_abort(ap);
+}
+
+static unsigned int pdc_host_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ unsigned int handled = 0;
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+ u32 port_status, err_mask;
+
+ err_mask = PDC_ERR_MASK;
+ if (ap->flags & PDC_FLAG_GEN_II)
+ err_mask &= ~PDC1_ERR_MASK;
+ else
+ err_mask &= ~PDC2_ERR_MASK;
+ port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
+ if (unlikely(port_status & err_mask)) {
+ pdc_error_intr(ap, qc, port_status, err_mask);
+ return 1;
+ }
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ case ATA_PROT_NODATA:
+ case ATAPI_PROT_DMA:
+ case ATAPI_PROT_NODATA:
+ qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+ ata_qc_complete(qc);
+ handled = 1;
+ break;
+ default:
+ ap->stats.idle_irq++;
+ break;
+ }
+
+ return handled;
+}
+
+static void pdc_irq_clear(struct ata_port *ap)
+{
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+
+ readl(ata_mmio + PDC_COMMAND);
+}
+
+static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ata_port *ap;
+ u32 mask = 0;
+ unsigned int i, tmp;
+ unsigned int handled = 0;
+ void __iomem *host_mmio;
+ unsigned int hotplug_offset, ata_no;
+ u32 hotplug_status;
+ int is_sataii_tx4;
+
+ VPRINTK("ENTER\n");
+
+ if (!host || !host->iomap[PDC_MMIO_BAR]) {
+ VPRINTK("QUICK EXIT\n");
+ return IRQ_NONE;
+ }
+
+ host_mmio = host->iomap[PDC_MMIO_BAR];
+
+ spin_lock(&host->lock);
+
+ /* read and clear hotplug flags for all ports */
+ if (host->ports[0]->flags & PDC_FLAG_GEN_II)
+ hotplug_offset = PDC2_SATA_PLUG_CSR;
+ else
+ hotplug_offset = PDC_SATA_PLUG_CSR;
+ hotplug_status = readl(host_mmio + hotplug_offset);
+ if (hotplug_status & 0xff)
+ writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
+ hotplug_status &= 0xff; /* clear uninteresting bits */
+
+ /* reading should also clear interrupts */
+ mask = readl(host_mmio + PDC_INT_SEQMASK);
+
+ if (mask == 0xffffffff && hotplug_status == 0) {
+ VPRINTK("QUICK EXIT 2\n");
+ goto done_irq;
+ }
+
+ mask &= 0xffff; /* only 16 SEQIDs possible */
+ if (mask == 0 && hotplug_status == 0) {
+ VPRINTK("QUICK EXIT 3\n");
+ goto done_irq;
+ }
+
+ writel(mask, host_mmio + PDC_INT_SEQMASK);
+
+ is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
+
+ for (i = 0; i < host->n_ports; i++) {
+ VPRINTK("port %u\n", i);
+ ap = host->ports[i];
+
+ /* check for a plug or unplug event */
+ ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+ tmp = hotplug_status & (0x11 << ata_no);
+ if (tmp && ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
+ ata_port_freeze(ap);
+ ++handled;
+ continue;
+ }
+
+ /* check for a packet interrupt */
+ tmp = mask & (1 << (i + 1));
+ if (tmp && ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled += pdc_host_intr(ap, qc);
+ }
+ }
+
+ VPRINTK("EXIT\n");
+
+done_irq:
+ spin_unlock(&host->lock);
+ return IRQ_RETVAL(handled);
+}
+
+static void pdc_packet_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pdc_port_priv *pp = ap->private_data;
+ void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
+ unsigned int port_no = ap->port_no;
+ u8 seq = (u8) (port_no + 1);
+
+ VPRINTK("ENTER, ap %p\n", ap);
+
+ writel(0x00000001, host_mmio + (seq * 4));
+ readl(host_mmio + (seq * 4)); /* flush */
+
+ pp->pkt[2] = seq;
+ wmb(); /* flush PRD, pkt writes */
+ writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
+ readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
+}
+
+static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
+{
+ switch (qc->tf.protocol) {
+ case ATAPI_PROT_NODATA:
+ if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
+ break;
+ /*FALLTHROUGH*/
+ case ATA_PROT_NODATA:
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ break;
+ /*FALLTHROUGH*/
+ case ATAPI_PROT_DMA:
+ case ATA_PROT_DMA:
+ pdc_packet_start(qc);
+ return 0;
+ default:
+ break;
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+ ata_sff_tf_load(ap, tf);
+}
+
+static void pdc_exec_command_mmio(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
+ ata_sff_exec_command(ap, tf);
+}
+
+static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ u8 *scsicmd = qc->scsicmd->cmnd;
+ int pio = 1; /* atapi dma off by default */
+
+ /* Whitelist commands that may use DMA. */
+ switch (scsicmd[0]) {
+ case WRITE_12:
+ case WRITE_10:
+ case WRITE_6:
+ case READ_12:
+ case READ_10:
+ case READ_6:
+ case 0xad: /* READ_DVD_STRUCTURE */
+ case 0xbe: /* READ_CD */
+ pio = 0;
+ }
+ /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
+ if (scsicmd[0] == WRITE_10) {
+ unsigned int lba =
+ (scsicmd[2] << 24) |
+ (scsicmd[3] << 16) |
+ (scsicmd[4] << 8) |
+ scsicmd[5];
+ if (lba >= 0xFFFF4FA2)
+ pio = 1;
+ }
+ return pio;
+}
+
+static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ /* First generation chips cannot use ATAPI DMA on SATA ports */
+ return 1;
+}
+
+static void pdc_ata_setup_port(struct ata_port *ap,
+ void __iomem *base, void __iomem *scr_addr)
+{
+ ap->ioaddr.cmd_addr = base;
+ ap->ioaddr.data_addr = base;
+ ap->ioaddr.feature_addr =
+ ap->ioaddr.error_addr = base + 0x4;
+ ap->ioaddr.nsect_addr = base + 0x8;
+ ap->ioaddr.lbal_addr = base + 0xc;
+ ap->ioaddr.lbam_addr = base + 0x10;
+ ap->ioaddr.lbah_addr = base + 0x14;
+ ap->ioaddr.device_addr = base + 0x18;
+ ap->ioaddr.command_addr =
+ ap->ioaddr.status_addr = base + 0x1c;
+ ap->ioaddr.altstatus_addr =
+ ap->ioaddr.ctl_addr = base + 0x38;
+ ap->ioaddr.scr_addr = scr_addr;
+}
+
+static void pdc_host_init(struct ata_host *host)
+{
+ void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
+ int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
+ int hotplug_offset;
+ u32 tmp;
+
+ if (is_gen2)
+ hotplug_offset = PDC2_SATA_PLUG_CSR;
+ else
+ hotplug_offset = PDC_SATA_PLUG_CSR;
+
+ /*
+ * Except for the hotplug stuff, this is voodoo from the
+ * Promise driver. Label this entire section
+ * "TODO: figure out why we do this"
+ */
+
+ /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
+ tmp = readl(host_mmio + PDC_FLASH_CTL);
+ tmp |= 0x02000; /* bit 13 (enable bmr burst) */
+ if (!is_gen2)
+ tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
+ writel(tmp, host_mmio + PDC_FLASH_CTL);
+
+ /* clear plug/unplug flags for all ports */
+ tmp = readl(host_mmio + hotplug_offset);
+ writel(tmp | 0xff, host_mmio + hotplug_offset);
+
+ /* unmask plug/unplug ints */
+ tmp = readl(host_mmio + hotplug_offset);
+ writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
+
+ /* don't initialise TBG or SLEW on 2nd generation chips */
+ if (is_gen2)
+ return;
+
+ /* reduce TBG clock to 133 Mhz. */
+ tmp = readl(host_mmio + PDC_TBG_MODE);
+ tmp &= ~0x30000; /* clear bit 17, 16*/
+ tmp |= 0x10000; /* set bit 17:16 = 0:1 */
+ writel(tmp, host_mmio + PDC_TBG_MODE);
+
+ readl(host_mmio + PDC_TBG_MODE); /* flush */
+ msleep(10);
+
+ /* adjust slew rate control register. */
+ tmp = readl(host_mmio + PDC_SLEW_CTL);
+ tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
+ tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
+ writel(tmp, host_mmio + PDC_SLEW_CTL);
+}
+
+static int pdc_ata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+ const struct ata_port_info *ppi[PDC_MAX_PORTS];
+ struct ata_host *host;
+ void __iomem *host_mmio;
+ int n_ports, i, rc;
+ int is_sataii_tx4;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* enable and acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
+
+ /* determine port configuration and setup host */
+ n_ports = 2;
+ if (pi->flags & PDC_FLAG_4_PORTS)
+ n_ports = 4;
+ for (i = 0; i < n_ports; i++)
+ ppi[i] = pi;
+
+ if (pi->flags & PDC_FLAG_SATA_PATA) {
+ u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
+ if (!(tmp & 0x80))
+ ppi[n_ports++] = pi + 1;
+ }
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ return -ENOMEM;
+ }
+ host->iomap = pcim_iomap_table(pdev);
+
+ is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+ unsigned int ata_offset = 0x200 + ata_no * 0x80;
+ unsigned int scr_offset = 0x400 + ata_no * 0x100;
+
+ pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
+
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
+ }
+
+ /* initialize adapter */
+ pdc_host_init(host);
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ /* start host, request IRQ and attach */
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
+ &pdc_ata_sht);
+}
+
+static int __init pdc_ata_init(void)
+{
+ return pci_register_driver(&pdc_ata_pci_driver);
+}
+
+static void __exit pdc_ata_exit(void)
+{
+ pci_unregister_driver(&pdc_ata_pci_driver);
+}
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pdc_ata_init);
+module_exit(pdc_ata_exit);
diff --git a/drivers/ata/sata_promise.h b/drivers/ata/sata_promise.h
new file mode 100644
index 0000000..00d6000
--- /dev/null
+++ b/drivers/ata/sata_promise.h
@@ -0,0 +1,157 @@
+/*
+ * sata_promise.h - Promise SATA common definitions and inline funcs
+ *
+ * Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ */
+
+#ifndef __SATA_PROMISE_H__
+#define __SATA_PROMISE_H__
+
+#include <linux/ata.h>
+
+enum pdc_packet_bits {
+ PDC_PKT_READ = (1 << 2),
+ PDC_PKT_NODATA = (1 << 3),
+
+ PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5),
+ PDC_PKT_CLEAR_BSY = (1 << 4),
+ PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4),
+ PDC_LAST_REG = (1 << 3),
+
+ PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1),
+};
+
+static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
+ dma_addr_t sg_table,
+ unsigned int devno, u8 *buf)
+{
+ u8 dev_reg;
+ __le32 *buf32 = (__le32 *) buf;
+
+ /* set control bits (byte 0), zero delay seq id (byte 3),
+ * and seq id (byte 2)
+ */
+ switch (tf->protocol) {
+ case ATA_PROT_DMA:
+ if (!(tf->flags & ATA_TFLAG_WRITE))
+ buf32[0] = cpu_to_le32(PDC_PKT_READ);
+ else
+ buf32[0] = 0;
+ break;
+
+ case ATA_PROT_NODATA:
+ buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
+ buf32[2] = 0; /* no next-packet */
+
+ if (devno == 0)
+ dev_reg = ATA_DEVICE_OBS;
+ else
+ dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+ /* select device */
+ buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+ buf[13] = dev_reg;
+
+ /* device control register */
+ buf[14] = (1 << 5) | PDC_REG_DEVCTL;
+ buf[15] = tf->ctl;
+
+ return 16; /* offset of next byte */
+}
+
+static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
+ unsigned int i)
+{
+ if (tf->flags & ATA_TFLAG_DEVICE) {
+ buf[i++] = (1 << 5) | ATA_REG_DEVICE;
+ buf[i++] = tf->device;
+ }
+
+ /* and finally the command itself; also includes end-of-pkt marker */
+ buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
+ buf[i++] = tf->command;
+
+ return i;
+}
+
+static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+ /* the "(1 << 5)" should be read "(count << 5)" */
+
+ /* ATA command block registers */
+ buf[i++] = (1 << 5) | ATA_REG_FEATURE;
+ buf[i++] = tf->feature;
+
+ buf[i++] = (1 << 5) | ATA_REG_NSECT;
+ buf[i++] = tf->nsect;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAL;
+ buf[i++] = tf->lbal;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAM;
+ buf[i++] = tf->lbam;
+
+ buf[i++] = (1 << 5) | ATA_REG_LBAH;
+ buf[i++] = tf->lbah;
+
+ return i;
+}
+
+static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+ /* the "(2 << 5)" should be read "(count << 5)" */
+
+ /* ATA command block registers */
+ buf[i++] = (2 << 5) | ATA_REG_FEATURE;
+ buf[i++] = tf->hob_feature;
+ buf[i++] = tf->feature;
+
+ buf[i++] = (2 << 5) | ATA_REG_NSECT;
+ buf[i++] = tf->hob_nsect;
+ buf[i++] = tf->nsect;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAL;
+ buf[i++] = tf->hob_lbal;
+ buf[i++] = tf->lbal;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAM;
+ buf[i++] = tf->hob_lbam;
+ buf[i++] = tf->lbam;
+
+ buf[i++] = (2 << 5) | ATA_REG_LBAH;
+ buf[i++] = tf->hob_lbah;
+ buf[i++] = tf->lbah;
+
+ return i;
+}
+
+
+#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
new file mode 100644
index 0000000..a000c86
--- /dev/null
+++ b/drivers/ata/sata_qstor.c
@@ -0,0 +1,683 @@
+/*
+ * sata_qstor.c - Pacific Digital Corporation QStor SATA
+ *
+ * Maintained by: Mark Lord <mlord@pobox.com>
+ *
+ * Copyright 2005 Pacific Digital Corporation.
+ * (OSL/GPL code release authorized by Jalil Fadavi).
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_qstor"
+#define DRV_VERSION "0.09"
+
+enum {
+ QS_MMIO_BAR = 4,
+
+ QS_PORTS = 4,
+ QS_MAX_PRD = LIBATA_MAX_PRD,
+ QS_CPB_ORDER = 6,
+ QS_CPB_BYTES = (1 << QS_CPB_ORDER),
+ QS_PRD_BYTES = QS_MAX_PRD * 16,
+ QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
+
+ /* global register offsets */
+ QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
+ QS_HID_HPHY = 0x0004, /* host physical interface info */
+ QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
+ QS_HST_SFF = 0x0100, /* host status fifo offset */
+ QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
+
+ /* global control bits */
+ QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
+ QS_CNFG3_GSRST = 0x01, /* global chip reset */
+ QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
+
+ /* per-channel register offsets */
+ QS_CCF_CPBA = 0x0710, /* chan CPB base address */
+ QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
+ QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
+ QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
+ QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
+ QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
+ QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
+ QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
+ QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
+
+ /* channel control bits */
+ QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
+ QS_CTR0_CLER = (1 << 2), /* clear channel errors */
+ QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
+ QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
+ QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
+
+ /* pkt sub-field headers */
+ QS_HCB_HDR = 0x01, /* Host Control Block header */
+ QS_DCB_HDR = 0x02, /* Device Control Block header */
+
+ /* pkt HCB flag bits */
+ QS_HF_DIRO = (1 << 0), /* data DIRection Out */
+ QS_HF_DAT = (1 << 3), /* DATa pkt */
+ QS_HF_IEN = (1 << 4), /* Interrupt ENable */
+ QS_HF_VLD = (1 << 5), /* VaLiD pkt */
+
+ /* pkt DCB flag bits */
+ QS_DF_PORD = (1 << 2), /* Pio OR Dma */
+ QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
+
+ /* PCI device IDs */
+ board_2068_idx = 0, /* QStor 4-port SATA/RAID */
+};
+
+enum {
+ QS_DMA_BOUNDARY = ~0UL
+};
+
+typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
+
+struct qs_port_priv {
+ u8 *pkt;
+ dma_addr_t pkt_dma;
+ qs_state_t state;
+};
+
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int qs_port_start(struct ata_port *ap);
+static void qs_host_stop(struct ata_host *host);
+static void qs_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
+static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
+static void qs_bmdma_stop(struct ata_queued_cmd *qc);
+static u8 qs_bmdma_status(struct ata_port *ap);
+static void qs_freeze(struct ata_port *ap);
+static void qs_thaw(struct ata_port *ap);
+static int qs_prereset(struct ata_link *link, unsigned long deadline);
+static void qs_error_handler(struct ata_port *ap);
+
+static struct scsi_host_template qs_ata_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = QS_MAX_PRD,
+ .dma_boundary = QS_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations qs_ata_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .check_atapi_dma = qs_check_atapi_dma,
+ .bmdma_stop = qs_bmdma_stop,
+ .bmdma_status = qs_bmdma_status,
+ .qc_prep = qs_qc_prep,
+ .qc_issue = qs_qc_issue,
+
+ .freeze = qs_freeze,
+ .thaw = qs_thaw,
+ .prereset = qs_prereset,
+ .softreset = ATA_OP_NULL,
+ .error_handler = qs_error_handler,
+ .post_internal_cmd = ATA_OP_NULL,
+
+ .scr_read = qs_scr_read,
+ .scr_write = qs_scr_write,
+
+ .port_start = qs_port_start,
+ .host_stop = qs_host_stop,
+};
+
+static const struct ata_port_info qs_port_info[] = {
+ /* board_2068_idx */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
+ .pio_mask = 0x10, /* pio4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &qs_ata_ops,
+ },
+};
+
+static const struct pci_device_id qs_ata_pci_tbl[] = {
+ { PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver qs_ata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = qs_ata_pci_tbl,
+ .probe = qs_ata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static void __iomem *qs_mmio_base(struct ata_host *host)
+{
+ return host->iomap[QS_MMIO_BAR];
+}
+
+static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return 1; /* ATAPI DMA not supported */
+}
+
+static void qs_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ /* nothing */
+}
+
+static u8 qs_bmdma_status(struct ata_port *ap)
+{
+ return 0;
+}
+
+static inline void qs_enter_reg_mode(struct ata_port *ap)
+{
+ u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+ struct qs_port_priv *pp = ap->private_data;
+
+ pp->state = qs_state_mmio;
+ writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
+ readb(chan + QS_CCT_CTR0); /* flush */
+}
+
+static inline void qs_reset_channel_logic(struct ata_port *ap)
+{
+ u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+
+ writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
+ readb(chan + QS_CCT_CTR0); /* flush */
+ qs_enter_reg_mode(ap);
+}
+
+static void qs_freeze(struct ata_port *ap)
+{
+ u8 __iomem *mmio_base = qs_mmio_base(ap->host);
+
+ writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+ qs_enter_reg_mode(ap);
+}
+
+static void qs_thaw(struct ata_port *ap)
+{
+ u8 __iomem *mmio_base = qs_mmio_base(ap->host);
+
+ qs_enter_reg_mode(ap);
+ writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
+}
+
+static int qs_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+
+ qs_reset_channel_logic(ap);
+ return ata_sff_prereset(link, deadline);
+}
+
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
+ return 0;
+}
+
+static void qs_error_handler(struct ata_port *ap)
+{
+ qs_enter_reg_mode(ap);
+ ata_std_error_handler(ap);
+}
+
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
+ return 0;
+}
+
+static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg;
+ struct ata_port *ap = qc->ap;
+ struct qs_port_priv *pp = ap->private_data;
+ u8 *prd = pp->pkt + QS_CPB_BYTES;
+ unsigned int si;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u64 addr;
+ u32 len;
+
+ addr = sg_dma_address(sg);
+ *(__le64 *)prd = cpu_to_le64(addr);
+ prd += sizeof(u64);
+
+ len = sg_dma_len(sg);
+ *(__le32 *)prd = cpu_to_le32(len);
+ prd += sizeof(u64);
+
+ VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
+ (unsigned long long)addr, len);
+ }
+
+ return si;
+}
+
+static void qs_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct qs_port_priv *pp = qc->ap->private_data;
+ u8 dflags = QS_DF_PORD, *buf = pp->pkt;
+ u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
+ u64 addr;
+ unsigned int nelem;
+
+ VPRINTK("ENTER\n");
+
+ qs_enter_reg_mode(qc->ap);
+ if (qc->tf.protocol != ATA_PROT_DMA) {
+ ata_sff_qc_prep(qc);
+ return;
+ }
+
+ nelem = qs_fill_sg(qc);
+
+ if ((qc->tf.flags & ATA_TFLAG_WRITE))
+ hflags |= QS_HF_DIRO;
+ if ((qc->tf.flags & ATA_TFLAG_LBA48))
+ dflags |= QS_DF_ELBA;
+
+ /* host control block (HCB) */
+ buf[ 0] = QS_HCB_HDR;
+ buf[ 1] = hflags;
+ *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
+ *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
+ addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
+ *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
+
+ /* device control block (DCB) */
+ buf[24] = QS_DCB_HDR;
+ buf[28] = dflags;
+
+ /* frame information structure (FIS) */
+ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
+}
+
+static inline void qs_packet_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
+
+ VPRINTK("ENTER, ap %p\n", ap);
+
+ writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
+ wmb(); /* flush PRDs and pkt to memory */
+ writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
+ readl(chan + QS_CCT_CFF); /* flush */
+}
+
+static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct qs_port_priv *pp = qc->ap->private_data;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ pp->state = qs_state_pkt;
+ qs_packet_start(qc);
+ return 0;
+
+ case ATAPI_PROT_DMA:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+
+ pp->state = qs_state_mmio;
+ return ata_sff_qc_issue(qc);
+}
+
+static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
+{
+ qc->err_mask |= ac_err_mask(status);
+
+ if (!qc->err_mask) {
+ ata_qc_complete(qc);
+ } else {
+ struct ata_port *ap = qc->ap;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "status 0x%02X", status);
+
+ if (qc->err_mask == AC_ERR_DEV)
+ ata_port_abort(ap);
+ else
+ ata_port_freeze(ap);
+ }
+}
+
+static inline unsigned int qs_intr_pkt(struct ata_host *host)
+{
+ unsigned int handled = 0;
+ u8 sFFE;
+ u8 __iomem *mmio_base = qs_mmio_base(host);
+
+ do {
+ u32 sff0 = readl(mmio_base + QS_HST_SFF);
+ u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
+ u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
+ sFFE = sff1 >> 31; /* empty flag */
+
+ if (sEVLD) {
+ u8 sDST = sff0 >> 16; /* dev status */
+ u8 sHST = sff1 & 0x3f; /* host status */
+ unsigned int port_no = (sff1 >> 8) & 0x03;
+ struct ata_port *ap = host->ports[port_no];
+
+ DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
+ sff1, sff0, port_no, sHST, sDST);
+ handled = 1;
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+ struct qs_port_priv *pp = ap->private_data;
+ if (!pp || pp->state != qs_state_pkt)
+ continue;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
+ switch (sHST) {
+ case 0: /* successful CPB */
+ case 3: /* device error */
+ qs_enter_reg_mode(qc->ap);
+ qs_do_or_die(qc, sDST);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+ } while (!sFFE);
+ return handled;
+}
+
+static inline unsigned int qs_intr_mmio(struct ata_host *host)
+{
+ unsigned int handled = 0, port_no;
+
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ struct ata_port *ap;
+ ap = host->ports[port_no];
+ if (ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+ struct qs_port_priv *pp;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) {
+ /*
+ * The qstor hardware generates spurious
+ * interrupts from time to time when switching
+ * in and out of packet mode.
+ * There's no obvious way to know if we're
+ * here now due to that, so just ack the irq
+ * and pretend we knew it was ours.. (ugh).
+ * This does not affect packet mode.
+ */
+ ata_sff_check_status(ap);
+ handled = 1;
+ continue;
+ }
+ pp = ap->private_data;
+ if (!pp || pp->state != qs_state_mmio)
+ continue;
+ if (!(qc->tf.flags & ATA_TFLAG_POLLING))
+ handled |= ata_sff_host_intr(ap, qc);
+ }
+ }
+ return handled;
+}
+
+static irqreturn_t qs_intr(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ VPRINTK("ENTER\n");
+
+ spin_lock_irqsave(&host->lock, flags);
+ handled = qs_intr_pkt(host) | qs_intr_mmio(host);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+ port->cmd_addr =
+ port->data_addr = base + 0x400;
+ port->error_addr =
+ port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
+ port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
+ port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
+ port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
+ port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
+ port->device_addr = base + 0x430;
+ port->status_addr =
+ port->command_addr = base + 0x438;
+ port->altstatus_addr =
+ port->ctl_addr = base + 0x440;
+ port->scr_addr = base + 0xc00;
+}
+
+static int qs_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct qs_port_priv *pp;
+ void __iomem *mmio_base = qs_mmio_base(ap->host);
+ void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
+ u64 addr;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+ pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
+ GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+ memset(pp->pkt, 0, QS_PKT_BYTES);
+ ap->private_data = pp;
+
+ qs_enter_reg_mode(ap);
+ addr = (u64)pp->pkt_dma;
+ writel((u32) addr, chan + QS_CCF_CPBA);
+ writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
+ return 0;
+}
+
+static void qs_host_stop(struct ata_host *host)
+{
+ void __iomem *mmio_base = qs_mmio_base(host);
+
+ writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
+}
+
+static void qs_host_init(struct ata_host *host, unsigned int chip_id)
+{
+ void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
+ unsigned int port_no;
+
+ writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
+ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
+
+ /* reset each channel in turn */
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ u8 __iomem *chan = mmio_base + (port_no * 0x4000);
+ writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
+ writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
+ readb(chan + QS_CCT_CTR0); /* flush */
+ }
+ writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
+
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ u8 __iomem *chan = mmio_base + (port_no * 0x4000);
+ /* set FIFO depths to same settings as Windows driver */
+ writew(32, chan + QS_CFC_HUFT);
+ writew(32, chan + QS_CFC_HDFT);
+ writew(10, chan + QS_CFC_DUFT);
+ writew( 8, chan + QS_CFC_DDFT);
+ /* set CPB size in bytes, as a power of two */
+ writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
+ }
+ writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
+}
+
+/*
+ * The QStor understands 64-bit buses, and uses 64-bit fields
+ * for DMA pointers regardless of bus width. We just have to
+ * make sure our DMA masks are set appropriately for whatever
+ * bridge lies between us and the QStor, and then the DMA mapping
+ * code will ensure we only ever "see" appropriate buffer addresses.
+ * If we're 32-bit limited somewhere, then our 64-bit fields will
+ * just end up with zeros in the upper 32-bits, without any special
+ * logic required outside of this routine (below).
+ */
+static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
+{
+ u32 bus_info = readl(mmio_base + QS_HID_HPHY);
+ int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
+
+ if (have_64bit_bus &&
+ !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qs_ata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
+ struct ata_host *host;
+ int rc, port_no;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
+ return -ENODEV;
+
+ rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
+ if (rc)
+ return rc;
+
+ for (port_no = 0; port_no < host->n_ports; ++port_no) {
+ struct ata_port *ap = host->ports[port_no];
+ unsigned int offset = port_no * 0x4000;
+ void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
+
+ qs_ata_setup_port(&ap->ioaddr, chan);
+
+ ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
+ }
+
+ /* initialize adapter */
+ qs_host_init(host, board_idx);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
+ &qs_ata_sht);
+}
+
+static int __init qs_ata_init(void)
+{
+ return pci_register_driver(&qs_ata_pci_driver);
+}
+
+static void __exit qs_ata_exit(void)
+{
+ pci_unregister_driver(&qs_ata_pci_driver);
+}
+
+MODULE_AUTHOR("Mark Lord");
+MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(qs_ata_init);
+module_exit(qs_ata_exit);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
new file mode 100644
index 0000000..031d7b7
--- /dev/null
+++ b/drivers/ata/sata_sil.c
@@ -0,0 +1,701 @@
+/*
+ * sata_sil.c - Silicon Image SATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2005 Red Hat, Inc.
+ * Copyright 2003 Benjamin Herrenschmidt
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Documentation for SiI 3112:
+ * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
+ *
+ * Other errata and documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_sil"
+#define DRV_VERSION "2.3"
+
+enum {
+ SIL_MMIO_BAR = 5,
+
+ /*
+ * host flags
+ */
+ SIL_FLAG_NO_SATA_IRQ = (1 << 28),
+ SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
+ SIL_FLAG_MOD15WRITE = (1 << 30),
+
+ SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO,
+
+ /*
+ * Controller IDs
+ */
+ sil_3112 = 0,
+ sil_3112_no_sata_irq = 1,
+ sil_3512 = 2,
+ sil_3114 = 3,
+
+ /*
+ * Register offsets
+ */
+ SIL_SYSCFG = 0x48,
+
+ /*
+ * Register bits
+ */
+ /* SYSCFG */
+ SIL_MASK_IDE0_INT = (1 << 22),
+ SIL_MASK_IDE1_INT = (1 << 23),
+ SIL_MASK_IDE2_INT = (1 << 24),
+ SIL_MASK_IDE3_INT = (1 << 25),
+ SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT,
+ SIL_MASK_4PORT = SIL_MASK_2PORT |
+ SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT,
+
+ /* BMDMA/BMDMA2 */
+ SIL_INTR_STEERING = (1 << 1),
+
+ SIL_DMA_ENABLE = (1 << 0), /* DMA run switch */
+ SIL_DMA_RDWR = (1 << 3), /* DMA Rd-Wr */
+ SIL_DMA_SATA_IRQ = (1 << 4), /* OR of all SATA IRQs */
+ SIL_DMA_ACTIVE = (1 << 16), /* DMA running */
+ SIL_DMA_ERROR = (1 << 17), /* PCI bus error */
+ SIL_DMA_COMPLETE = (1 << 18), /* cmd complete / IRQ pending */
+ SIL_DMA_N_SATA_IRQ = (1 << 6), /* SATA_IRQ for the next channel */
+ SIL_DMA_N_ACTIVE = (1 << 24), /* ACTIVE for the next channel */
+ SIL_DMA_N_ERROR = (1 << 25), /* ERROR for the next channel */
+ SIL_DMA_N_COMPLETE = (1 << 26), /* COMPLETE for the next channel */
+
+ /* SIEN */
+ SIL_SIEN_N = (1 << 16), /* triggered by SError.N */
+
+ /*
+ * Others
+ */
+ SIL_QUIRK_MOD15WRITE = (1 << 0),
+ SIL_QUIRK_UDMA5MAX = (1 << 1),
+};
+
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM
+static int sil_pci_device_resume(struct pci_dev *pdev);
+#endif
+static void sil_dev_config(struct ata_device *dev);
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
+static void sil_freeze(struct ata_port *ap);
+static void sil_thaw(struct ata_port *ap);
+
+
+static const struct pci_device_id sil_pci_tbl[] = {
+ { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
+ { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
+ { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
+ { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
+ { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
+ { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
+ { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
+
+ { } /* terminate list */
+};
+
+
+/* TODO firmware versions should be added - eric */
+static const struct sil_drivelist {
+ const char *product;
+ unsigned int quirk;
+} sil_blacklist [] = {
+ { "ST320012AS", SIL_QUIRK_MOD15WRITE },
+ { "ST330013AS", SIL_QUIRK_MOD15WRITE },
+ { "ST340017AS", SIL_QUIRK_MOD15WRITE },
+ { "ST360015AS", SIL_QUIRK_MOD15WRITE },
+ { "ST380023AS", SIL_QUIRK_MOD15WRITE },
+ { "ST3120023AS", SIL_QUIRK_MOD15WRITE },
+ { "ST340014ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST360014ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
+ { }
+};
+
+static struct pci_driver sil_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sil_pci_tbl,
+ .probe = sil_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = sil_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sil_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sil_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .dev_config = sil_dev_config,
+ .set_mode = sil_set_mode,
+ .freeze = sil_freeze,
+ .thaw = sil_thaw,
+ .scr_read = sil_scr_read,
+ .scr_write = sil_scr_write,
+};
+
+static const struct ata_port_info sil_port_info[] = {
+ /* sil_3112 */
+ {
+ .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sil_ops,
+ },
+ /* sil_3112_no_sata_irq */
+ {
+ .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
+ SIL_FLAG_NO_SATA_IRQ,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sil_ops,
+ },
+ /* sil_3512 */
+ {
+ .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sil_ops,
+ },
+ /* sil_3114 */
+ {
+ .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &sil_ops,
+ },
+};
+
+/* per-port register offsets */
+/* TODO: we can probably calculate rather than use a table */
+static const struct {
+ unsigned long tf; /* ATA taskfile register block */
+ unsigned long ctl; /* ATA control/altstatus register block */
+ unsigned long bmdma; /* DMA register block */
+ unsigned long bmdma2; /* DMA register block #2 */
+ unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */
+ unsigned long scr; /* SATA control register block */
+ unsigned long sien; /* SATA Interrupt Enable register */
+ unsigned long xfer_mode;/* data transfer mode register */
+ unsigned long sfis_cfg; /* SATA FIS reception config register */
+} sil_port[] = {
+ /* port 0 ... */
+ /* tf ctl bmdma bmdma2 fifo scr sien mode sfis */
+ { 0x80, 0x8A, 0x0, 0x10, 0x40, 0x100, 0x148, 0xb4, 0x14c },
+ { 0xC0, 0xCA, 0x8, 0x18, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc },
+ { 0x280, 0x28A, 0x200, 0x210, 0x240, 0x300, 0x348, 0x2b4, 0x34c },
+ { 0x2C0, 0x2CA, 0x208, 0x218, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc },
+ /* ... port 3 */
+};
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static int slow_down;
+module_param(slow_down, int, 0444);
+MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
+
+
+static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
+{
+ u8 cache_line = 0;
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line);
+ return cache_line;
+}
+
+/**
+ * sil_set_mode - wrap set_mode functions
+ * @link: link to set up
+ * @r_failed: returned device when we fail
+ *
+ * Wrap the libata method for device setup as after the setup we need
+ * to inspect the results and do some configuration work
+ */
+
+static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+ void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
+ struct ata_device *dev;
+ u32 tmp, dev_mode[2] = { };
+ int rc;
+
+ rc = ata_do_set_mode(link, r_failed);
+ if (rc)
+ return rc;
+
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_dev_enabled(dev))
+ dev_mode[dev->devno] = 0; /* PIO0/1/2 */
+ else if (dev->flags & ATA_DFLAG_PIO)
+ dev_mode[dev->devno] = 1; /* PIO3/4 */
+ else
+ dev_mode[dev->devno] = 3; /* UDMA */
+ /* value 2 indicates MDMA */
+ }
+
+ tmp = readl(addr);
+ tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0));
+ tmp |= dev_mode[0];
+ tmp |= (dev_mode[1] << 4);
+ writel(tmp, addr);
+ readl(addr); /* flush */
+ return 0;
+}
+
+static inline void __iomem *sil_scr_addr(struct ata_port *ap,
+ unsigned int sc_reg)
+{
+ void __iomem *offset = ap->ioaddr.scr_addr;
+
+ switch (sc_reg) {
+ case SCR_STATUS:
+ return offset + 4;
+ case SCR_ERROR:
+ return offset + 8;
+ case SCR_CONTROL:
+ return offset;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return NULL;
+}
+
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
+
+ if (mmio) {
+ *val = readl(mmio);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
+
+ if (mmio) {
+ writel(val, mmio);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ u8 status;
+
+ if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
+ u32 serror;
+
+ /* SIEN doesn't mask SATA IRQs on some 3112s. Those
+ * controllers continue to assert IRQ as long as
+ * SError bits are pending. Clear SError immediately.
+ */
+ sil_scr_read(&ap->link, SCR_ERROR, &serror);
+ sil_scr_write(&ap->link, SCR_ERROR, serror);
+
+ /* Sometimes spurious interrupts occur, double check
+ * it's PHYRDY CHG.
+ */
+ if (serror & SERR_PHYRDY_CHG) {
+ ap->link.eh_info.serror |= serror;
+ goto freeze;
+ }
+
+ if (!(bmdma2 & SIL_DMA_COMPLETE))
+ return;
+ }
+
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ /* this sometimes happens, just clear IRQ */
+ ap->ops->sff_check_status(ap);
+ return;
+ }
+
+ /* Check whether we are expecting interrupt in this state */
+ switch (ap->hsm_task_state) {
+ case HSM_ST_FIRST:
+ /* Some pre-ATAPI-4 devices assert INTRQ
+ * at this state when ready to receive CDB.
+ */
+
+ /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+ * The flag was turned on only for atapi devices. No
+ * need to check ata_is_atapi(qc->tf.protocol) again.
+ */
+ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+ goto err_hsm;
+ break;
+ case HSM_ST_LAST:
+ if (ata_is_dma(qc->tf.protocol)) {
+ /* clear DMA-Start bit */
+ ap->ops->bmdma_stop(qc);
+
+ if (bmdma2 & SIL_DMA_ERROR) {
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ ap->hsm_task_state = HSM_ST_ERR;
+ }
+ }
+ break;
+ case HSM_ST:
+ break;
+ default:
+ goto err_hsm;
+ }
+
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ goto err_hsm;
+
+ /* ack bmdma irq events */
+ ata_sff_irq_clear(ap);
+
+ /* kick HSM in the ass */
+ ata_sff_hsm_move(ap, qc, status, 0);
+
+ if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ ata_ehi_push_desc(ehi, "BMDMA2 stat 0x%x", bmdma2);
+
+ return;
+
+ err_hsm:
+ qc->err_mask |= AC_ERR_HSM;
+ freeze:
+ ata_port_freeze(ap);
+}
+
+static irqreturn_t sil_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+ int handled = 0;
+ int i;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
+
+ if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
+ continue;
+
+ /* turn off SATA_IRQ if not supported */
+ if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
+ bmdma2 &= ~SIL_DMA_SATA_IRQ;
+
+ if (bmdma2 == 0xffffffff ||
+ !(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
+ continue;
+
+ sil_host_intr(ap, bmdma2);
+ handled = 1;
+ }
+
+ spin_unlock(&host->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void sil_freeze(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+ u32 tmp;
+
+ /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
+ writel(0, mmio_base + sil_port[ap->port_no].sien);
+
+ /* plug IRQ */
+ tmp = readl(mmio_base + SIL_SYSCFG);
+ tmp |= SIL_MASK_IDE0_INT << ap->port_no;
+ writel(tmp, mmio_base + SIL_SYSCFG);
+ readl(mmio_base + SIL_SYSCFG); /* flush */
+}
+
+static void sil_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+ u32 tmp;
+
+ /* clear IRQ */
+ ap->ops->sff_check_status(ap);
+ ata_sff_irq_clear(ap);
+
+ /* turn on SATA IRQ if supported */
+ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
+ writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
+
+ /* turn on IRQ */
+ tmp = readl(mmio_base + SIL_SYSCFG);
+ tmp &= ~(SIL_MASK_IDE0_INT << ap->port_no);
+ writel(tmp, mmio_base + SIL_SYSCFG);
+}
+
+/**
+ * sil_dev_config - Apply device/host-specific errata fixups
+ * @dev: Device to be examined
+ *
+ * After the IDENTIFY [PACKET] DEVICE step is complete, and a
+ * device is known to be present, this function is called.
+ * We apply two errata fixups which are specific to Silicon Image,
+ * a Seagate and a Maxtor fixup.
+ *
+ * For certain Seagate devices, we must limit the maximum sectors
+ * to under 8K.
+ *
+ * For certain Maxtor devices, we must not program the drive
+ * beyond udma5.
+ *
+ * Both fixups are unfairly pessimistic. As soon as I get more
+ * information on these errata, I will create a more exhaustive
+ * list, and apply the fixups to only the specific
+ * devices/hosts/firmwares that need it.
+ *
+ * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted
+ * The Maxtor quirk is in the blacklist, but I'm keeping the original
+ * pessimistic fix for the following reasons...
+ * - There seems to be less info on it, only one device gleaned off the
+ * Windows driver, maybe only one is affected. More info would be greatly
+ * appreciated.
+ * - But then again UDMA5 is hardly anything to complain about
+ */
+static void sil_dev_config(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ int print_info = ap->link.eh_context.i.flags & ATA_EHI_PRINTINFO;
+ unsigned int n, quirks = 0;
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ for (n = 0; sil_blacklist[n].product; n++)
+ if (!strcmp(sil_blacklist[n].product, model_num)) {
+ quirks = sil_blacklist[n].quirk;
+ break;
+ }
+
+ /* limit requests to 15 sectors */
+ if (slow_down ||
+ ((ap->flags & SIL_FLAG_MOD15WRITE) &&
+ (quirks & SIL_QUIRK_MOD15WRITE))) {
+ if (print_info)
+ ata_dev_printk(dev, KERN_INFO, "applying Seagate "
+ "errata fix (mod15write workaround)\n");
+ dev->max_sectors = 15;
+ return;
+ }
+
+ /* limit to udma5 */
+ if (quirks & SIL_QUIRK_UDMA5MAX) {
+ if (print_info)
+ ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
+ "errata fix %s\n", model_num);
+ dev->udma_mask &= ATA_UDMA5;
+ return;
+ }
+}
+
+static void sil_init_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
+ u8 cls;
+ u32 tmp;
+ int i;
+
+ /* Initialize FIFO PCI bus arbitration */
+ cls = sil_get_device_cache_line(pdev);
+ if (cls) {
+ cls >>= 3;
+ cls++; /* cls = (line_size/8)+1 */
+ for (i = 0; i < host->n_ports; i++)
+ writew(cls << 8 | cls,
+ mmio_base + sil_port[i].fifo_cfg);
+ } else
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "cache line size not set. Driver may not function\n");
+
+ /* Apply R_ERR on DMA activate FIS errata workaround */
+ if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
+ int cnt;
+
+ for (i = 0, cnt = 0; i < host->n_ports; i++) {
+ tmp = readl(mmio_base + sil_port[i].sfis_cfg);
+ if ((tmp & 0x3) != 0x01)
+ continue;
+ if (!cnt)
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Applying R_ERR on DMA activate "
+ "FIS errata fix\n");
+ writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
+ cnt++;
+ }
+ }
+
+ if (host->n_ports == 4) {
+ /* flip the magic "make 4 ports work" bit */
+ tmp = readl(mmio_base + sil_port[2].bmdma);
+ if ((tmp & SIL_INTR_STEERING) == 0)
+ writel(tmp | SIL_INTR_STEERING,
+ mmio_base + sil_port[2].bmdma);
+ }
+}
+
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ int board_id = ent->driver_data;
+ const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL };
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int n_ports, rc;
+ unsigned int i;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* allocate host */
+ n_ports = 2;
+ if (board_id == sil_3114)
+ n_ports = 4;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ mmio_base = host->iomap[SIL_MMIO_BAR];
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ioaddr->cmd_addr = mmio_base + sil_port[i].tf;
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = mmio_base + sil_port[i].ctl;
+ ioaddr->bmdma_addr = mmio_base + sil_port[i].bmdma;
+ ioaddr->scr_addr = mmio_base + sil_port[i].scr;
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_pbar_desc(ap, SIL_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, SIL_MMIO_BAR, sil_port[i].tf, "tf");
+ }
+
+ /* initialize and activate */
+ sil_init_controller(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, sil_interrupt, IRQF_SHARED,
+ &sil_sht);
+}
+
+#ifdef CONFIG_PM
+static int sil_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ sil_init_controller(host);
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
+
+static int __init sil_init(void)
+{
+ return pci_register_driver(&sil_pci_driver);
+}
+
+static void __exit sil_exit(void)
+{
+ pci_unregister_driver(&sil_pci_driver);
+}
+
+
+module_init(sil_init);
+module_exit(sil_exit);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
new file mode 100644
index 0000000..ccee930
--- /dev/null
+++ b/drivers/ata/sata_sil24.c
@@ -0,0 +1,1388 @@
+/*
+ * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
+ *
+ * Copyright 2005 Tejun Heo
+ *
+ * Based on preview driver from Silicon Image.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_sil24"
+#define DRV_VERSION "1.1"
+
+/*
+ * Port request block (PRB) 32 bytes
+ */
+struct sil24_prb {
+ __le16 ctrl;
+ __le16 prot;
+ __le32 rx_cnt;
+ u8 fis[6 * 4];
+};
+
+/*
+ * Scatter gather entry (SGE) 16 bytes
+ */
+struct sil24_sge {
+ __le64 addr;
+ __le32 cnt;
+ __le32 flags;
+};
+
+/*
+ * Port multiplier
+ */
+struct sil24_port_multiplier {
+ __le32 diag;
+ __le32 sactive;
+};
+
+enum {
+ SIL24_HOST_BAR = 0,
+ SIL24_PORT_BAR = 2,
+
+ /* sil24 fetches in chunks of 64bytes. The first block
+ * contains the PRB and two SGEs. From the second block, it's
+ * consisted of four SGEs and called SGT. Calculate the
+ * number of SGTs that fit into one page.
+ */
+ SIL24_PRB_SZ = sizeof(struct sil24_prb)
+ + 2 * sizeof(struct sil24_sge),
+ SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
+ / (4 * sizeof(struct sil24_sge)),
+
+ /* This will give us one unused SGEs for ATA. This extra SGE
+ * will be used to store CDB for ATAPI devices.
+ */
+ SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1,
+
+ /*
+ * Global controller registers (128 bytes @ BAR0)
+ */
+ /* 32 bit regs */
+ HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
+ HOST_CTRL = 0x40,
+ HOST_IRQ_STAT = 0x44,
+ HOST_PHY_CFG = 0x48,
+ HOST_BIST_CTRL = 0x50,
+ HOST_BIST_PTRN = 0x54,
+ HOST_BIST_STAT = 0x58,
+ HOST_MEM_BIST_STAT = 0x5c,
+ HOST_FLASH_CMD = 0x70,
+ /* 8 bit regs */
+ HOST_FLASH_DATA = 0x74,
+ HOST_TRANSITION_DETECT = 0x75,
+ HOST_GPIO_CTRL = 0x76,
+ HOST_I2C_ADDR = 0x78, /* 32 bit */
+ HOST_I2C_DATA = 0x7c,
+ HOST_I2C_XFER_CNT = 0x7e,
+ HOST_I2C_CTRL = 0x7f,
+
+ /* HOST_SLOT_STAT bits */
+ HOST_SSTAT_ATTN = (1 << 31),
+
+ /* HOST_CTRL bits */
+ HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
+ HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
+ HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
+ HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
+ HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
+ HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
+
+ /*
+ * Port registers
+ * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
+ */
+ PORT_REGS_SIZE = 0x2000,
+
+ PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
+ PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
+
+ PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
+ PORT_PMP_STATUS = 0x0000, /* port device status offset */
+ PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
+ PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
+
+ /* 32 bit regs */
+ PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
+ PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
+ PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
+ PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
+ PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
+ PORT_ACTIVATE_UPPER_ADDR= 0x101c,
+ PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
+ PORT_CMD_ERR = 0x1024, /* command error number */
+ PORT_FIS_CFG = 0x1028,
+ PORT_FIFO_THRES = 0x102c,
+ /* 16 bit regs */
+ PORT_DECODE_ERR_CNT = 0x1040,
+ PORT_DECODE_ERR_THRESH = 0x1042,
+ PORT_CRC_ERR_CNT = 0x1044,
+ PORT_CRC_ERR_THRESH = 0x1046,
+ PORT_HSHK_ERR_CNT = 0x1048,
+ PORT_HSHK_ERR_THRESH = 0x104a,
+ /* 32 bit regs */
+ PORT_PHY_CFG = 0x1050,
+ PORT_SLOT_STAT = 0x1800,
+ PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+ PORT_CONTEXT = 0x1e04,
+ PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
+ PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
+ PORT_SCONTROL = 0x1f00,
+ PORT_SSTATUS = 0x1f04,
+ PORT_SERROR = 0x1f08,
+ PORT_SACTIVE = 0x1f0c,
+
+ /* PORT_CTRL_STAT bits */
+ PORT_CS_PORT_RST = (1 << 0), /* port reset */
+ PORT_CS_DEV_RST = (1 << 1), /* device reset */
+ PORT_CS_INIT = (1 << 2), /* port initialize */
+ PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
+ PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
+ PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
+ PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
+ PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
+ PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
+
+ /* PORT_IRQ_STAT/ENABLE_SET/CLR */
+ /* bits[11:0] are masked */
+ PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
+ PORT_IRQ_ERROR = (1 << 1), /* command execution error */
+ PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
+ PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
+ PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
+ PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
+ PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
+ PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
+ PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
+ PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
+ PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
+ PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
+
+ DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
+ PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
+ PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
+
+ /* bits[27:16] are unmasked (raw) */
+ PORT_IRQ_RAW_SHIFT = 16,
+ PORT_IRQ_MASKED_MASK = 0x7ff,
+ PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
+
+ /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
+ PORT_IRQ_STEER_SHIFT = 30,
+ PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
+
+ /* PORT_CMD_ERR constants */
+ PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
+ PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
+ PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
+ PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
+ PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
+ PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
+ PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
+ PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
+ PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
+ PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
+ PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
+ PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
+ PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
+ PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
+ PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
+ PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
+ PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
+ PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
+ PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
+ PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
+ PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
+ PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
+
+ /* bits of PRB control field */
+ PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
+ PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
+ PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
+ PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
+ PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
+
+ /* PRB protocol field */
+ PRB_PROT_PACKET = (1 << 0),
+ PRB_PROT_TCQ = (1 << 1),
+ PRB_PROT_NCQ = (1 << 2),
+ PRB_PROT_READ = (1 << 3),
+ PRB_PROT_WRITE = (1 << 4),
+ PRB_PROT_TRANSPARENT = (1 << 5),
+
+ /*
+ * Other constants
+ */
+ SGE_TRM = (1 << 31), /* Last SGE in chain */
+ SGE_LNK = (1 << 30), /* linked list
+ Points to SGT, not SGE */
+ SGE_DRD = (1 << 29), /* discard data read (/dev/null)
+ data address ignored */
+
+ SIL24_MAX_CMDS = 31,
+
+ /* board id */
+ BID_SIL3124 = 0,
+ BID_SIL3132 = 1,
+ BID_SIL3131 = 2,
+
+ /* host flags */
+ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_AN | ATA_FLAG_PMP,
+ SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
+
+ IRQ_STAT_4PORTS = 0xf,
+};
+
+struct sil24_ata_block {
+ struct sil24_prb prb;
+ struct sil24_sge sge[SIL24_MAX_SGE];
+};
+
+struct sil24_atapi_block {
+ struct sil24_prb prb;
+ u8 cdb[16];
+ struct sil24_sge sge[SIL24_MAX_SGE];
+};
+
+union sil24_cmd_block {
+ struct sil24_ata_block ata;
+ struct sil24_atapi_block atapi;
+};
+
+static struct sil24_cerr_info {
+ unsigned int err_mask, action;
+ const char *desc;
+} sil24_cerr_db[] = {
+ [0] = { AC_ERR_DEV, 0,
+ "device error" },
+ [PORT_CERR_DEV] = { AC_ERR_DEV, 0,
+ "device error via D2H FIS" },
+ [PORT_CERR_SDB] = { AC_ERR_DEV, 0,
+ "device error via SDB FIS" },
+ [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
+ "error in data FIS" },
+ [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
+ "failed to transmit command FIS" },
+ [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
+ "protocol mismatch" },
+ [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
+ "data directon mismatch" },
+ [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
+ "ran out of SGEs while writing" },
+ [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
+ "ran out of SGEs while reading" },
+ [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
+ "invalid data directon for ATAPI CDB" },
+ [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
+ "SGT not on qword boundary" },
+ [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI target abort while fetching SGT" },
+ [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI master abort while fetching SGT" },
+ [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI parity error while fetching SGT" },
+ [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
+ "PRB not on qword boundary" },
+ [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI target abort while fetching PRB" },
+ [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI master abort while fetching PRB" },
+ [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI parity error while fetching PRB" },
+ [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "undefined error while transferring data" },
+ [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI target abort while transferring data" },
+ [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI master abort while transferring data" },
+ [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
+ "PCI parity error while transferring data" },
+ [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET,
+ "FIS received while sending service FIS" },
+};
+
+/*
+ * ap->private_data
+ *
+ * The preview driver always returned 0 for status. We emulate it
+ * here from the previous interrupt.
+ */
+struct sil24_port_priv {
+ union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
+ dma_addr_t cmd_block_dma; /* DMA base addr for them */
+ int do_port_rst;
+};
+
+static void sil24_dev_config(struct ata_device *dev);
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
+static int sil24_qc_defer(struct ata_queued_cmd *qc);
+static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void sil24_pmp_attach(struct ata_port *ap);
+static void sil24_pmp_detach(struct ata_port *ap);
+static void sil24_freeze(struct ata_port *ap);
+static void sil24_thaw(struct ata_port *ap);
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void sil24_error_handler(struct ata_port *ap);
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
+static int sil24_port_start(struct ata_port *ap);
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM
+static int sil24_pci_device_resume(struct pci_dev *pdev);
+static int sil24_port_resume(struct ata_port *ap);
+#endif
+
+static const struct pci_device_id sil24_pci_tbl[] = {
+ { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
+ { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
+ { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
+ { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
+ { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
+ { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
+ { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver sil24_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sil24_pci_tbl,
+ .probe = sil24_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = sil24_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sil24_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = SIL24_MAX_CMDS,
+ .sg_tablesize = SIL24_MAX_SGE,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static struct ata_port_operations sil24_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = sil24_qc_defer,
+ .qc_prep = sil24_qc_prep,
+ .qc_issue = sil24_qc_issue,
+ .qc_fill_rtf = sil24_qc_fill_rtf,
+
+ .freeze = sil24_freeze,
+ .thaw = sil24_thaw,
+ .softreset = sil24_softreset,
+ .hardreset = sil24_hardreset,
+ .pmp_softreset = sil24_softreset,
+ .pmp_hardreset = sil24_pmp_hardreset,
+ .error_handler = sil24_error_handler,
+ .post_internal_cmd = sil24_post_internal_cmd,
+ .dev_config = sil24_dev_config,
+
+ .scr_read = sil24_scr_read,
+ .scr_write = sil24_scr_write,
+ .pmp_attach = sil24_pmp_attach,
+ .pmp_detach = sil24_pmp_detach,
+
+ .port_start = sil24_port_start,
+#ifdef CONFIG_PM
+ .port_resume = sil24_port_resume,
+#endif
+};
+
+/*
+ * Use bits 30-31 of port_flags to encode available port numbers.
+ * Current maxium is 4.
+ */
+#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
+#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
+
+static const struct ata_port_info sil24_port_info[] = {
+ /* sil_3124 */
+ {
+ .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
+ SIL24_FLAG_PCIX_IRQ_WOC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &sil24_ops,
+ },
+ /* sil_3132 */
+ {
+ .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &sil24_ops,
+ },
+ /* sil_3131/sil_3531 */
+ {
+ .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &sil24_ops,
+ },
+};
+
+static int sil24_tag(int tag)
+{
+ if (unlikely(ata_tag_internal(tag)))
+ return 0;
+ return tag;
+}
+
+static unsigned long sil24_port_offset(struct ata_port *ap)
+{
+ return ap->port_no * PORT_REGS_SIZE;
+}
+
+static void __iomem *sil24_port_base(struct ata_port *ap)
+{
+ return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
+}
+
+static void sil24_dev_config(struct ata_device *dev)
+{
+ void __iomem *port = sil24_port_base(dev->link->ap);
+
+ if (dev->cdb_len == 16)
+ writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
+ else
+ writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
+}
+
+static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
+{
+ void __iomem *port = sil24_port_base(ap);
+ struct sil24_prb __iomem *prb;
+ u8 fis[6 * 4];
+
+ prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
+ memcpy_fromio(fis, prb->fis, sizeof(fis));
+ ata_tf_from_fis(fis, tf);
+}
+
+static int sil24_scr_map[] = {
+ [SCR_CONTROL] = 0,
+ [SCR_STATUS] = 1,
+ [SCR_ERROR] = 2,
+ [SCR_ACTIVE] = 3,
+};
+
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
+{
+ void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
+
+ if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+ void __iomem *addr;
+ addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+ *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
+{
+ void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
+
+ if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+ void __iomem *addr;
+ addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+ writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void sil24_config_port(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+
+ /* configure IRQ WoC */
+ if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+ writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
+ else
+ writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+ /* zero error counters. */
+ writel(0x8000, port + PORT_DECODE_ERR_THRESH);
+ writel(0x8000, port + PORT_CRC_ERR_THRESH);
+ writel(0x8000, port + PORT_HSHK_ERR_THRESH);
+ writel(0x0000, port + PORT_DECODE_ERR_CNT);
+ writel(0x0000, port + PORT_CRC_ERR_CNT);
+ writel(0x0000, port + PORT_HSHK_ERR_CNT);
+
+ /* always use 64bit activation */
+ writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
+
+ /* clear port multiplier enable and resume bits */
+ writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+}
+
+static void sil24_config_pmp(struct ata_port *ap, int attached)
+{
+ void __iomem *port = sil24_port_base(ap);
+
+ if (attached)
+ writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
+ else
+ writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
+}
+
+static void sil24_clear_pmp(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+ int i;
+
+ writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
+
+ for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
+ void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
+
+ writel(0, pmp_base + PORT_PMP_STATUS);
+ writel(0, pmp_base + PORT_PMP_QACTIVE);
+ }
+}
+
+static int sil24_init_port(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+ struct sil24_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* clear PMP error status */
+ if (sata_pmp_attached(ap))
+ sil24_clear_pmp(ap);
+
+ writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
+ ata_wait_register(port + PORT_CTRL_STAT,
+ PORT_CS_INIT, PORT_CS_INIT, 10, 100);
+ tmp = ata_wait_register(port + PORT_CTRL_STAT,
+ PORT_CS_RDY, 0, 10, 100);
+
+ if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
+ pp->do_port_rst = 1;
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
+ const struct ata_taskfile *tf,
+ int is_cmd, u32 ctrl,
+ unsigned long timeout_msec)
+{
+ void __iomem *port = sil24_port_base(ap);
+ struct sil24_port_priv *pp = ap->private_data;
+ struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
+ dma_addr_t paddr = pp->cmd_block_dma;
+ u32 irq_enabled, irq_mask, irq_stat;
+ int rc;
+
+ prb->ctrl = cpu_to_le16(ctrl);
+ ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
+
+ /* temporarily plug completion and error interrupts */
+ irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
+ writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
+
+ writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+ writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
+
+ irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
+ irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0,
+ 10, timeout_msec);
+
+ writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
+ irq_stat >>= PORT_IRQ_RAW_SHIFT;
+
+ if (irq_stat & PORT_IRQ_COMPLETE)
+ rc = 0;
+ else {
+ /* force port into known state */
+ sil24_init_port(ap);
+
+ if (irq_stat & PORT_IRQ_ERROR)
+ rc = -EIO;
+ else
+ rc = -EBUSY;
+ }
+
+ /* restore IRQ enabled */
+ writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
+
+ return rc;
+}
+
+static int sil24_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ int pmp = sata_srst_pmp(link);
+ unsigned long timeout_msec = 0;
+ struct ata_taskfile tf;
+ const char *reason;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* put the port into known state */
+ if (sil24_init_port(ap)) {
+ reason = "port not ready";
+ goto err;
+ }
+
+ /* do SRST */
+ if (time_after(deadline, jiffies))
+ timeout_msec = jiffies_to_msecs(deadline - jiffies);
+
+ ata_tf_init(link->device, &tf); /* doesn't really matter */
+ rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
+ timeout_msec);
+ if (rc == -EBUSY) {
+ reason = "timeout";
+ goto err;
+ } else if (rc) {
+ reason = "SRST command error";
+ goto err;
+ }
+
+ sil24_read_tf(ap, 0, &tf);
+ *class = ata_dev_classify(&tf);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ err:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return -EIO;
+}
+
+static int sil24_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port = sil24_port_base(ap);
+ struct sil24_port_priv *pp = ap->private_data;
+ int did_port_rst = 0;
+ const char *reason;
+ int tout_msec, rc;
+ u32 tmp;
+
+ retry:
+ /* Sometimes, DEV_RST is not enough to recover the controller.
+ * This happens often after PM DMA CS errata.
+ */
+ if (pp->do_port_rst) {
+ ata_port_printk(ap, KERN_WARNING, "controller in dubious "
+ "state, performing PORT_RST\n");
+
+ writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
+ msleep(10);
+ writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+ ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
+ 10, 5000);
+
+ /* restore port configuration */
+ sil24_config_port(ap);
+ sil24_config_pmp(ap, ap->nr_pmp_links);
+
+ pp->do_port_rst = 0;
+ did_port_rst = 1;
+ }
+
+ /* sil24 does the right thing(tm) without any protection */
+ sata_set_spd(link);
+
+ tout_msec = 100;
+ if (ata_link_online(link))
+ tout_msec = 5000;
+
+ writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
+ tmp = ata_wait_register(port + PORT_CTRL_STAT,
+ PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
+ tout_msec);
+
+ /* SStatus oscillates between zero and valid status after
+ * DEV_RST, debounce it.
+ */
+ rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
+ if (rc) {
+ reason = "PHY debouncing failed";
+ goto err;
+ }
+
+ if (tmp & PORT_CS_DEV_RST) {
+ if (ata_link_offline(link))
+ return 0;
+ reason = "link not ready";
+ goto err;
+ }
+
+ /* Sil24 doesn't store signature FIS after hardreset, so we
+ * can't wait for BSY to clear. Some devices take a long time
+ * to get ready and those devices will choke if we don't wait
+ * for BSY clearance here. Tell libata to perform follow-up
+ * softreset.
+ */
+ return -EAGAIN;
+
+ err:
+ if (!did_port_rst) {
+ pp->do_port_rst = 1;
+ goto retry;
+ }
+
+ ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason);
+ return -EIO;
+}
+
+static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+ struct sil24_sge *sge)
+{
+ struct scatterlist *sg;
+ struct sil24_sge *last_sge = NULL;
+ unsigned int si;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->cnt = cpu_to_le32(sg_dma_len(sg));
+ sge->flags = 0;
+
+ last_sge = sge;
+ sge++;
+ }
+
+ last_sge->flags = cpu_to_le32(SGE_TRM);
+}
+
+static int sil24_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_link *link = qc->dev->link;
+ struct ata_port *ap = link->ap;
+ u8 prot = qc->tf.protocol;
+
+ /*
+ * There is a bug in the chip:
+ * Port LRAM Causes the PRB/SGT Data to be Corrupted
+ * If the host issues a read request for LRAM and SActive registers
+ * while active commands are available in the port, PRB/SGT data in
+ * the LRAM can become corrupted. This issue applies only when
+ * reading from, but not writing to, the LRAM.
+ *
+ * Therefore, reading LRAM when there is no particular error [and
+ * other commands may be outstanding] is prohibited.
+ *
+ * To avoid this bug there are two situations where a command must run
+ * exclusive of any other commands on the port:
+ *
+ * - ATAPI commands which check the sense data
+ * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
+ * set.
+ *
+ */
+ int is_excl = (ata_is_atapi(prot) ||
+ (qc->flags & ATA_QCFLAG_RESULT_TF));
+
+ if (unlikely(ap->excl_link)) {
+ if (link == ap->excl_link) {
+ if (ap->nr_active_links)
+ return ATA_DEFER_PORT;
+ qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+ } else
+ return ATA_DEFER_PORT;
+ } else if (unlikely(is_excl)) {
+ ap->excl_link = link;
+ if (ap->nr_active_links)
+ return ATA_DEFER_PORT;
+ qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
+ }
+
+ return ata_std_qc_defer(qc);
+}
+
+static void sil24_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sil24_port_priv *pp = ap->private_data;
+ union sil24_cmd_block *cb;
+ struct sil24_prb *prb;
+ struct sil24_sge *sge;
+ u16 ctrl = 0;
+
+ cb = &pp->cmd_block[sil24_tag(qc->tag)];
+
+ if (!ata_is_atapi(qc->tf.protocol)) {
+ prb = &cb->ata.prb;
+ sge = cb->ata.sge;
+ } else {
+ prb = &cb->atapi.prb;
+ sge = cb->atapi.sge;
+ memset(cb->atapi.cdb, 0, 32);
+ memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
+
+ if (ata_is_data(qc->tf.protocol)) {
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ ctrl = PRB_CTRL_PACKET_WRITE;
+ else
+ ctrl = PRB_CTRL_PACKET_READ;
+ }
+ }
+
+ prb->ctrl = cpu_to_le16(ctrl);
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
+
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ sil24_fill_sg(qc, sge);
+}
+
+static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct sil24_port_priv *pp = ap->private_data;
+ void __iomem *port = sil24_port_base(ap);
+ unsigned int tag = sil24_tag(qc->tag);
+ dma_addr_t paddr;
+ void __iomem *activate;
+
+ paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
+ activate = port + PORT_CMD_ACTIVATE + tag * 8;
+
+ writel((u32)paddr, activate);
+ writel((u64)paddr >> 32, activate + 4);
+
+ return 0;
+}
+
+static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ sil24_read_tf(qc->ap, qc->tag, &qc->result_tf);
+ return true;
+}
+
+static void sil24_pmp_attach(struct ata_port *ap)
+{
+ u32 *gscr = ap->link.device->gscr;
+
+ sil24_config_pmp(ap, 1);
+ sil24_init_port(ap);
+
+ if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
+ sata_pmp_gscr_devid(gscr) == 0x4140) {
+ ata_port_printk(ap, KERN_INFO,
+ "disabling NCQ support due to sil24-mv4140 quirk\n");
+ ap->flags &= ~ATA_FLAG_NCQ;
+ }
+}
+
+static void sil24_pmp_detach(struct ata_port *ap)
+{
+ sil24_init_port(ap);
+ sil24_config_pmp(ap, 0);
+
+ ap->flags |= ATA_FLAG_NCQ;
+}
+
+static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int rc;
+
+ rc = sil24_init_port(link->ap);
+ if (rc) {
+ ata_link_printk(link, KERN_ERR,
+ "hardreset failed (port not ready)\n");
+ return rc;
+ }
+
+ return sata_std_hardreset(link, class, deadline);
+}
+
+static void sil24_freeze(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+
+ /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
+ * PORT_IRQ_ENABLE instead.
+ */
+ writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
+}
+
+static void sil24_thaw(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+ u32 tmp;
+
+ /* clear IRQ */
+ tmp = readl(port + PORT_IRQ_STAT);
+ writel(tmp, port + PORT_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
+}
+
+static void sil24_error_intr(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+ struct sil24_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc = NULL;
+ struct ata_link *link;
+ struct ata_eh_info *ehi;
+ int abort = 0, freeze = 0;
+ u32 irq_stat;
+
+ /* on error, we need to clear IRQ explicitly */
+ irq_stat = readl(port + PORT_IRQ_STAT);
+ writel(irq_stat, port + PORT_IRQ_STAT);
+
+ /* first, analyze and record host port events */
+ link = &ap->link;
+ ehi = &link->eh_info;
+ ata_ehi_clear_desc(ehi);
+
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+
+ if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
+ ata_ehi_push_desc(ehi, "SDB notify");
+ sata_async_notification(ap);
+ }
+
+ if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "%s",
+ irq_stat & PORT_IRQ_PHYRDY_CHG ?
+ "PHY RDY changed" : "device exchanged");
+ freeze = 1;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(ehi, "unknown FIS");
+ freeze = 1;
+ }
+
+ /* deal with command error */
+ if (irq_stat & PORT_IRQ_ERROR) {
+ struct sil24_cerr_info *ci = NULL;
+ unsigned int err_mask = 0, action = 0;
+ u32 context, cerr;
+ int pmp;
+
+ abort = 1;
+
+ /* DMA Context Switch Failure in Port Multiplier Mode
+ * errata. If we have active commands to 3 or more
+ * devices, any error condition on active devices can
+ * corrupt DMA context switching.
+ */
+ if (ap->nr_active_links >= 3) {
+ ehi->err_mask |= AC_ERR_OTHER;
+ ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(ehi, "PMP DMA CS errata");
+ pp->do_port_rst = 1;
+ freeze = 1;
+ }
+
+ /* find out the offending link and qc */
+ if (sata_pmp_attached(ap)) {
+ context = readl(port + PORT_CONTEXT);
+ pmp = (context >> 5) & 0xf;
+
+ if (pmp < ap->nr_pmp_links) {
+ link = &ap->pmp_link[pmp];
+ ehi = &link->eh_info;
+ qc = ata_qc_from_tag(ap, link->active_tag);
+
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
+ irq_stat);
+ } else {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_RESET;
+ freeze = 1;
+ }
+ } else
+ qc = ata_qc_from_tag(ap, link->active_tag);
+
+ /* analyze CMD_ERR */
+ cerr = readl(port + PORT_CMD_ERR);
+ if (cerr < ARRAY_SIZE(sil24_cerr_db))
+ ci = &sil24_cerr_db[cerr];
+
+ if (ci && ci->desc) {
+ err_mask |= ci->err_mask;
+ action |= ci->action;
+ if (action & ATA_EH_RESET)
+ freeze = 1;
+ ata_ehi_push_desc(ehi, "%s", ci->desc);
+ } else {
+ err_mask |= AC_ERR_OTHER;
+ action |= ATA_EH_RESET;
+ freeze = 1;
+ ata_ehi_push_desc(ehi, "unknown command error %d",
+ cerr);
+ }
+
+ /* record error info */
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ ehi->action |= action;
+
+ /* if PMP, resume */
+ if (sata_pmp_attached(ap))
+ writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
+ }
+
+ /* freeze or abort */
+ if (freeze)
+ ata_port_freeze(ap);
+ else if (abort) {
+ if (qc)
+ ata_link_abort(qc->dev->link);
+ else
+ ata_port_abort(ap);
+ }
+}
+
+static inline void sil24_host_intr(struct ata_port *ap)
+{
+ void __iomem *port = sil24_port_base(ap);
+ u32 slot_stat, qc_active;
+ int rc;
+
+ /* If PCIX_IRQ_WOC, there's an inherent race window between
+ * clearing IRQ pending status and reading PORT_SLOT_STAT
+ * which may cause spurious interrupts afterwards. This is
+ * unavoidable and much better than losing interrupts which
+ * happens if IRQ pending is cleared after reading
+ * PORT_SLOT_STAT.
+ */
+ if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+ writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
+ slot_stat = readl(port + PORT_SLOT_STAT);
+
+ if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
+ sil24_error_intr(ap);
+ return;
+ }
+
+ qc_active = slot_stat & ~HOST_SSTAT_ATTN;
+ rc = ata_qc_complete_multiple(ap, qc_active);
+ if (rc > 0)
+ return;
+ if (rc < 0) {
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ return;
+ }
+
+ /* spurious interrupts are expected if PCIX_IRQ_WOC */
+ if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
+ ata_port_printk(ap, KERN_INFO, "spurious interrupt "
+ "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
+ slot_stat, ap->link.active_tag, ap->link.sactive);
+}
+
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+ unsigned handled = 0;
+ u32 status;
+ int i;
+
+ status = readl(host_base + HOST_IRQ_STAT);
+
+ if (status == 0xffffffff) {
+ printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
+ "PCI fault or device removal?\n");
+ goto out;
+ }
+
+ if (!(status & IRQ_STAT_4PORTS))
+ goto out;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++)
+ if (status & (1 << i)) {
+ struct ata_port *ap = host->ports[i];
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ sil24_host_intr(ap);
+ handled++;
+ } else
+ printk(KERN_ERR DRV_NAME
+ ": interrupt from disabled port %d\n", i);
+ }
+
+ spin_unlock(&host->lock);
+ out:
+ return IRQ_RETVAL(handled);
+}
+
+static void sil24_error_handler(struct ata_port *ap)
+{
+ struct sil24_port_priv *pp = ap->private_data;
+
+ if (sil24_init_port(ap))
+ ata_eh_freeze_port(ap);
+
+ sata_pmp_error_handler(ap);
+
+ pp->do_port_rst = 0;
+}
+
+static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
+ ata_eh_freeze_port(ap);
+}
+
+static int sil24_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct sil24_port_priv *pp;
+ union sil24_cmd_block *cb;
+ size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
+ dma_addr_t cb_dma;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
+ memset(cb, 0, cb_size);
+
+ pp->cmd_block = cb;
+ pp->cmd_block_dma = cb_dma;
+
+ ap->private_data = pp;
+
+ ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
+ ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
+
+ return 0;
+}
+
+static void sil24_init_controller(struct ata_host *host)
+{
+ void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+ u32 tmp;
+ int i;
+
+ /* GPIO off */
+ writel(0, host_base + HOST_FLASH_CMD);
+
+ /* clear global reset & mask interrupts during initialization */
+ writel(0, host_base + HOST_CTRL);
+
+ /* init ports */
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *port = sil24_port_base(ap);
+
+
+ /* Initial PHY setting */
+ writel(0x20c, port + PORT_PHY_CFG);
+
+ /* Clear port RST */
+ tmp = readl(port + PORT_CTRL_STAT);
+ if (tmp & PORT_CS_PORT_RST) {
+ writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+ tmp = ata_wait_register(port + PORT_CTRL_STAT,
+ PORT_CS_PORT_RST,
+ PORT_CS_PORT_RST, 10, 100);
+ if (tmp & PORT_CS_PORT_RST)
+ dev_printk(KERN_ERR, host->dev,
+ "failed to clear port RST\n");
+ }
+
+ /* configure port */
+ sil24_config_port(ap);
+ }
+
+ /* Turn on interrupts */
+ writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+}
+
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
+ static int printed_version;
+ struct ata_port_info pi = sil24_port_info[ent->driver_data];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ void __iomem * const *iomap;
+ struct ata_host *host;
+ int rc;
+ u32 tmp;
+
+ /* cause link error if sil24_cmd_block is sized wrongly */
+ if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
+ __MARKER__sil24_cmd_block_is_sized_wrongly = 1;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev,
+ (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
+ DRV_NAME);
+ if (rc)
+ return rc;
+ iomap = pcim_iomap_table(pdev);
+
+ /* apply workaround for completion IRQ loss on PCI-X errata */
+ if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
+ tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
+ if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Applying completion IRQ loss on PCI-X "
+ "errata fix\n");
+ else
+ pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
+ }
+
+ /* allocate and fill host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi,
+ SIL24_FLAG2NPORTS(ppi[0]->flags));
+ if (!host)
+ return -ENOMEM;
+ host->iomap = iomap;
+
+ /* configure and activate the device */
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
+
+ /* Set max read request size to 4096. This slightly increases
+ * write throughput for pci-e variants.
+ */
+ pcie_set_readrq(pdev, 4096);
+
+ sil24_init_controller(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
+ &sil24_sht);
+}
+
+#ifdef CONFIG_PM
+static int sil24_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
+ writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
+
+ sil24_init_controller(host);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static int sil24_port_resume(struct ata_port *ap)
+{
+ sil24_config_pmp(ap, ap->nr_pmp_links);
+ return 0;
+}
+#endif
+
+static int __init sil24_init(void)
+{
+ return pci_register_driver(&sil24_pci_driver);
+}
+
+static void __exit sil24_exit(void)
+{
+ pci_unregister_driver(&sil24_pci_driver);
+}
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
+
+module_init(sil24_init);
+module_exit(sil24_exit);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
new file mode 100644
index 0000000..9c43b4e
--- /dev/null
+++ b/drivers/ata/sata_sis.c
@@ -0,0 +1,351 @@
+/*
+ * sata_sis.c - Silicon Integrated Systems SATA
+ *
+ * Maintained by: Uwe Koziolek
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004 Uwe Koziolek
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include "sis.h"
+
+#define DRV_NAME "sata_sis"
+#define DRV_VERSION "1.0"
+
+enum {
+ sis_180 = 0,
+ SIS_SCR_PCI_BAR = 5,
+
+ /* PCI configuration registers */
+ SIS_GENCTL = 0x54, /* IDE General Control register */
+ SIS_SCR_BASE = 0xc0, /* sata0 phy SCR registers */
+ SIS180_SATA1_OFS = 0x10, /* offset from sata0->sata1 phy regs */
+ SIS182_SATA1_OFS = 0x20, /* offset from sata0->sata1 phy regs */
+ SIS_PMR = 0x90, /* port mapping register */
+ SIS_PMR_COMBINED = 0x30,
+
+ /* random bits */
+ SIS_FLAG_CFGSCR = (1 << 30), /* host flag: SCRs via PCI cfg */
+
+ GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
+};
+
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static const struct pci_device_id sis_pci_tbl[] = {
+ { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
+ { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */
+ { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */
+ { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */
+ { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */
+ { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */
+
+ { } /* terminate list */
+};
+
+static struct pci_driver sis_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sis_pci_tbl,
+ .probe = sis_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static struct scsi_host_template sis_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sis_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = sis_scr_read,
+ .scr_write = sis_scr_write,
+};
+
+static const struct ata_port_info sis_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x7,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sis_ops,
+};
+
+MODULE_AUTHOR("Uwe Koziolek");
+MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
+ u8 pmr;
+
+ if (ap->port_no) {
+ switch (pdev->device) {
+ case 0x0180:
+ case 0x0181:
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+ if ((pmr & SIS_PMR_COMBINED) == 0)
+ addr += SIS180_SATA1_OFS;
+ break;
+
+ case 0x0182:
+ case 0x0183:
+ case 0x1182:
+ addr += SIS182_SATA1_OFS;
+ break;
+ }
+ }
+ return addr;
+}
+
+static u32 sis_scr_cfg_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
+{
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
+ u32 val2 = 0;
+ u8 pmr;
+
+ if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
+ return -EINVAL;
+
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+
+ pci_read_config_dword(pdev, cfg_addr, val);
+
+ if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
+ (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
+ pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
+
+ *val |= val2;
+ *val &= 0xfffffffb; /* avoid problems with powerdowned ports */
+
+ return 0;
+}
+
+static int sis_scr_cfg_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
+{
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
+ u8 pmr;
+
+ if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
+ return -EINVAL;
+
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+
+ pci_write_config_dword(pdev, cfg_addr, val);
+
+ if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
+ (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
+ pci_write_config_dword(pdev, cfg_addr+0x10, val);
+
+ return 0;
+}
+
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 pmr;
+
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+
+ if (ap->flags & SIS_FLAG_CFGSCR)
+ return sis_scr_cfg_read(link, sc_reg, val);
+
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+
+ *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+
+ if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
+ (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
+ *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
+
+ *val &= 0xfffffffb;
+
+ return 0;
+}
+
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 pmr;
+
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+
+ if (ap->flags & SIS_FLAG_CFGSCR)
+ return sis_scr_cfg_write(link, sc_reg, val);
+ else {
+ iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
+ (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
+ iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
+ return 0;
+ }
+}
+
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct ata_port_info pi = sis_port_info;
+ const struct ata_port_info *ppi[] = { &pi, &pi };
+ struct ata_host *host;
+ u32 genctl, val;
+ u8 pmr;
+ u8 port2_start = 0x20;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* check and see if the SCRs are in IO space or PCI cfg space */
+ pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
+ if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
+ pi.flags |= SIS_FLAG_CFGSCR;
+
+ /* if hardware thinks SCRs are in IO space, but there are
+ * no IO resources assigned, change to PCI cfg space.
+ */
+ if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
+ ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
+ (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
+ genctl &= ~GENCTL_IOMAPPED_SCR;
+ pci_write_config_dword(pdev, SIS_GENCTL, genctl);
+ pi.flags |= SIS_FLAG_CFGSCR;
+ }
+
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+ switch (ent->device) {
+ case 0x0180:
+ case 0x0181:
+
+ /* The PATA-handling is provided by pata_sis */
+ switch (pmr & 0x30) {
+ case 0x10:
+ ppi[1] = &sis_info133_for_sata;
+ break;
+
+ case 0x30:
+ ppi[0] = &sis_info133_for_sata;
+ break;
+ }
+ if ((pmr & SIS_PMR_COMBINED) == 0) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 180/181/964 chipset in SATA mode\n");
+ port2_start = 64;
+ } else {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 180/181 chipset in combined mode\n");
+ port2_start = 0;
+ pi.flags |= ATA_FLAG_SLAVE_POSS;
+ }
+ break;
+
+ case 0x0182:
+ case 0x0183:
+ pci_read_config_dword(pdev, 0x6C, &val);
+ if (val & (1L << 31)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 182/965 chipset\n");
+ pi.flags |= ATA_FLAG_SLAVE_POSS;
+ } else {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 182/965L chipset\n");
+ }
+ break;
+
+ case 0x1182:
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 1182/966/680 SATA controller\n");
+ pi.flags |= ATA_FLAG_SLAVE_POSS;
+ break;
+
+ case 0x1183:
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+ ppi[0] = &sis_info133_for_sata;
+ ppi[1] = &sis_info133_for_sata;
+ break;
+ }
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+
+ if (!(pi.flags & SIS_FLAG_CFGSCR)) {
+ void __iomem *mmio;
+
+ rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
+ if (rc)
+ return rc;
+ mmio = host->iomap[SIS_SCR_PCI_BAR];
+
+ host->ports[0]->ioaddr.scr_addr = mmio;
+ host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
+ }
+
+ pci_set_master(pdev);
+ pci_intx(pdev, 1);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &sis_sht);
+}
+
+static int __init sis_init(void)
+{
+ return pci_register_driver(&sis_pci_driver);
+}
+
+static void __exit sis_exit(void)
+{
+ pci_unregister_driver(&sis_pci_driver);
+}
+
+module_init(sis_init);
+module_exit(sis_exit);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
new file mode 100644
index 0000000..609d147
--- /dev/null
+++ b/drivers/ata/sata_svw.c
@@ -0,0 +1,551 @@
+/*
+ * sata_svw.c - ServerWorks / Apple K2 SATA
+ *
+ * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *
+ * Bits from Jeff Garzik, Copyright RedHat, Inc.
+ *
+ * This driver probably works with non-Apple versions of the
+ * Broadcom chipset...
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi.h>
+#include <linux/libata.h>
+
+#ifdef CONFIG_PPC_OF
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_OF */
+
+#define DRV_NAME "sata_svw"
+#define DRV_VERSION "2.3"
+
+enum {
+ /* ap->flags bits */
+ K2_FLAG_SATA_8_PORTS = (1 << 24),
+ K2_FLAG_NO_ATAPI_DMA = (1 << 25),
+ K2_FLAG_BAR_POS_3 = (1 << 26),
+
+ /* Taskfile registers offsets */
+ K2_SATA_TF_CMD_OFFSET = 0x00,
+ K2_SATA_TF_DATA_OFFSET = 0x00,
+ K2_SATA_TF_ERROR_OFFSET = 0x04,
+ K2_SATA_TF_NSECT_OFFSET = 0x08,
+ K2_SATA_TF_LBAL_OFFSET = 0x0c,
+ K2_SATA_TF_LBAM_OFFSET = 0x10,
+ K2_SATA_TF_LBAH_OFFSET = 0x14,
+ K2_SATA_TF_DEVICE_OFFSET = 0x18,
+ K2_SATA_TF_CMDSTAT_OFFSET = 0x1c,
+ K2_SATA_TF_CTL_OFFSET = 0x20,
+
+ /* DMA base */
+ K2_SATA_DMA_CMD_OFFSET = 0x30,
+
+ /* SCRs base */
+ K2_SATA_SCR_STATUS_OFFSET = 0x40,
+ K2_SATA_SCR_ERROR_OFFSET = 0x44,
+ K2_SATA_SCR_CONTROL_OFFSET = 0x48,
+
+ /* Others */
+ K2_SATA_SICR1_OFFSET = 0x80,
+ K2_SATA_SICR2_OFFSET = 0x84,
+ K2_SATA_SIM_OFFSET = 0x88,
+
+ /* Port stride */
+ K2_SATA_PORT_OFFSET = 0x100,
+
+ chip_svw4 = 0,
+ chip_svw8 = 1,
+ chip_svw42 = 2, /* bar 3 */
+ chip_svw43 = 3, /* bar 5 */
+};
+
+static u8 k2_stat_check_status(struct ata_port *ap);
+
+
+static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ u8 cmnd = qc->scsicmd->cmnd[0];
+
+ if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
+ return -1; /* ATAPI DMA not supported */
+ else {
+ switch (cmnd) {
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ return 0;
+
+ default:
+ return -1;
+ }
+
+ }
+}
+
+static int k2_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+
+static int k2_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+
+static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ writeb(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ writew(tf->feature | (((u16)tf->hob_feature) << 8),
+ ioaddr->feature_addr);
+ writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+ ioaddr->nsect_addr);
+ writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+ ioaddr->lbal_addr);
+ writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+ ioaddr->lbam_addr);
+ writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+ ioaddr->lbah_addr);
+ } else if (is_addr) {
+ writew(tf->feature, ioaddr->feature_addr);
+ writew(tf->nsect, ioaddr->nsect_addr);
+ writew(tf->lbal, ioaddr->lbal_addr);
+ writew(tf->lbam, ioaddr->lbam_addr);
+ writew(tf->lbah, ioaddr->lbah_addr);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ writeb(tf->device, ioaddr->device_addr);
+
+ ata_wait_idle(ap);
+}
+
+
+static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u16 nsect, lbal, lbam, lbah, feature;
+
+ tf->command = k2_stat_check_status(ap);
+ tf->device = readw(ioaddr->device_addr);
+ feature = readw(ioaddr->error_addr);
+ nsect = readw(ioaddr->nsect_addr);
+ lbal = readw(ioaddr->lbal_addr);
+ lbam = readw(ioaddr->lbam_addr);
+ lbah = readw(ioaddr->lbah_addr);
+
+ tf->feature = feature;
+ tf->nsect = nsect;
+ tf->lbal = lbal;
+ tf->lbam = lbam;
+ tf->lbah = lbah;
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ tf->hob_feature = feature >> 8;
+ tf->hob_nsect = nsect >> 8;
+ tf->hob_lbal = lbal >> 8;
+ tf->hob_lbam = lbam >> 8;
+ tf->hob_lbah = lbah >> 8;
+ }
+}
+
+/**
+ * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO)
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+
+static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dmactl;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+
+ /* load PRD table addr. */
+ mb(); /* make sure PRD table writes are visible to controller */
+ writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = readb(mmio + ATA_DMA_CMD);
+ dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+ writeb(dmactl, mmio + ATA_DMA_CMD);
+
+ /* issue r/w command if this is not a ATA DMA command*/
+ if (qc->tf.protocol != ATA_PROT_DMA)
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+/**
+ * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO)
+ * @qc: Info associated with this ATA transaction.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+
+static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *mmio = ap->ioaddr.bmdma_addr;
+ u8 dmactl;
+
+ /* start host DMA transaction */
+ dmactl = readb(mmio + ATA_DMA_CMD);
+ writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+ /* This works around possible data corruption.
+
+ On certain SATA controllers that can be seen when the r/w
+ command is given to the controller before the host DMA is
+ started.
+
+ On a Read command, the controller would initiate the
+ command to the drive even before it sees the DMA
+ start. When there are very fast drives connected to the
+ controller, or when the data request hits in the drive
+ cache, there is the possibility that the drive returns a
+ part or all of the requested data to the controller before
+ the DMA start is issued. In this case, the controller
+ would become confused as to what to do with the data. In
+ the worst case when all the data is returned back to the
+ controller, the controller could hang. In other cases it
+ could return partial data returning in data
+ corruption. This problem has been seen in PPC systems and
+ can also appear on an system with very fast disks, where
+ the SATA controller is sitting behind a number of bridges,
+ and hence there is significant latency between the r/w
+ command and the start command. */
+ /* issue r/w command if the access is to ATA */
+ if (qc->tf.protocol == ATA_PROT_DMA)
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+
+static u8 k2_stat_check_status(struct ata_port *ap)
+{
+ return readl(ap->ioaddr.status_addr);
+}
+
+#ifdef CONFIG_PPC_OF
+/*
+ * k2_sata_proc_info
+ * inout : decides on the direction of the dataflow and the meaning of the
+ * variables
+ * buffer: If inout==FALSE data is being written to it else read from it
+ * *start: If inout==FALSE start of the valid data in the buffer
+ * offset: If inout==FALSE offset from the beginning of the imaginary file
+ * from which we start writing into the buffer
+ * length: If inout==FALSE max number of bytes to be written into the buffer
+ * else number of bytes in the buffer
+ */
+static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
+ off_t offset, int count, int inout)
+{
+ struct ata_port *ap;
+ struct device_node *np;
+ int len, index;
+
+ /* Find the ata_port */
+ ap = ata_shost_to_port(shost);
+ if (ap == NULL)
+ return 0;
+
+ /* Find the OF node for the PCI device proper */
+ np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
+ if (np == NULL)
+ return 0;
+
+ /* Match it to a port node */
+ index = (ap == ap->host->ports[0]) ? 0 : 1;
+ for (np = np->child; np != NULL; np = np->sibling) {
+ const u32 *reg = of_get_property(np, "reg", NULL);
+ if (!reg)
+ continue;
+ if (index == *reg)
+ break;
+ }
+ if (np == NULL)
+ return 0;
+
+ len = sprintf(page, "devspec: %s\n", np->full_name);
+
+ return len;
+}
+#endif /* CONFIG_PPC_OF */
+
+
+static struct scsi_host_template k2_sata_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+#ifdef CONFIG_PPC_OF
+ .proc_info = k2_sata_proc_info,
+#endif
+};
+
+
+static struct ata_port_operations k2_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = k2_sata_tf_load,
+ .sff_tf_read = k2_sata_tf_read,
+ .sff_check_status = k2_stat_check_status,
+ .check_atapi_dma = k2_sata_check_atapi_dma,
+ .bmdma_setup = k2_bmdma_setup_mmio,
+ .bmdma_start = k2_bmdma_start_mmio,
+ .scr_read = k2_sata_scr_read,
+ .scr_write = k2_sata_scr_write,
+};
+
+static const struct ata_port_info k2_port_info[] = {
+ /* chip_svw4 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &k2_sata_ops,
+ },
+ /* chip_svw8 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
+ K2_FLAG_SATA_8_PORTS,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &k2_sata_ops,
+ },
+ /* chip_svw42 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &k2_sata_ops,
+ },
+ /* chip_svw43 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &k2_sata_ops,
+ },
+};
+
+static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+ port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
+ port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
+ port->feature_addr =
+ port->error_addr = base + K2_SATA_TF_ERROR_OFFSET;
+ port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET;
+ port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET;
+ port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET;
+ port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET;
+ port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET;
+ port->command_addr =
+ port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET;
+ port->altstatus_addr =
+ port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET;
+ port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET;
+ port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET;
+}
+
+
+static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] =
+ { &k2_port_info[ent->driver_data], NULL };
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int n_ports, i, rc, bar_pos;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* allocate host */
+ n_ports = 4;
+ if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS)
+ n_ports = 8;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+
+ bar_pos = 5;
+ if (ppi[0]->flags & K2_FLAG_BAR_POS_3)
+ bar_pos = 3;
+ /*
+ * If this driver happens to only be useful on Apple's K2, then
+ * we should check that here as it has a normal Serverworks ID
+ */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /*
+ * Check if we have resources mapped at all (second function may
+ * have been disabled by firmware)
+ */
+ if (pci_resource_len(pdev, bar_pos) == 0) {
+ /* In IDE mode we need to pin the device to ensure that
+ pcim_release does not clear the busmaster bit in config
+ space, clearing causes busmaster DMA to fail on
+ ports 3 & 4 */
+ pcim_pin_device(pdev);
+ return -ENODEV;
+ }
+
+ /* Request and iomap PCI regions */
+ rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+ mmio_base = host->iomap[bar_pos];
+
+ /* different controllers have different number of ports - currently 4 or 8 */
+ /* All ports are on the same function. Multi-function device is no
+ * longer available. This should not be seen in any system. */
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ unsigned int offset = i * K2_SATA_PORT_OFFSET;
+
+ k2_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+ ata_port_pbar_desc(ap, 5, -1, "mmio");
+ ata_port_pbar_desc(ap, 5, offset, "port");
+ }
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ /* Clear a magic bit in SCR1 according to Darwin, those help
+ * some funky seagate drives (though so far, those were already
+ * set by the firmware on the machines I had access to)
+ */
+ writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
+ mmio_base + K2_SATA_SICR1_OFFSET);
+
+ /* Clear SATA error & interrupts we don't use */
+ writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
+ writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &k2_sata_sht);
+}
+
+/* 0x240 is device ID for Apple K2 device
+ * 0x241 is device ID for Serverworks Frodo4
+ * 0x242 is device ID for Serverworks Frodo8
+ * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA
+ * controller
+ * */
+static const struct pci_device_id k2_sata_pci_tbl[] = {
+ { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 },
+ { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw8 },
+ { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw4 },
+ { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 },
+ { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 },
+ { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 },
+ { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 },
+
+ { }
+};
+
+static struct pci_driver k2_sata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = k2_sata_pci_tbl,
+ .probe = k2_sata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int __init k2_sata_init(void)
+{
+ return pci_register_driver(&k2_sata_pci_driver);
+}
+
+static void __exit k2_sata_exit(void)
+{
+ pci_unregister_driver(&k2_sata_pci_driver);
+}
+
+MODULE_AUTHOR("Benjamin Herrenschmidt");
+MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(k2_sata_init);
+module_exit(k2_sata_exit);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
new file mode 100644
index 0000000..ec04b8d
--- /dev/null
+++ b/drivers/ata/sata_sx4.c
@@ -0,0 +1,1454 @@
+/*
+ * sata_sx4.c - Promise SATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ */
+
+/*
+ Theory of operation
+ -------------------
+
+ The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
+ engine, DIMM memory, and four ATA engines (one per SATA port).
+ Data is copied to/from DIMM memory by the HDMA engine, before
+ handing off to one (or more) of the ATA engines. The ATA
+ engines operate solely on DIMM memory.
+
+ The SX4 behaves like a PATA chip, with no SATA controls or
+ knowledge whatsoever, leading to the presumption that
+ PATA<->SATA bridges exist on SX4 boards, external to the
+ PDC20621 chip itself.
+
+ The chip is quite capable, supporting an XOR engine and linked
+ hardware commands (permits a string to transactions to be
+ submitted and waited-on as a single unit), and an optional
+ microprocessor.
+
+ The limiting factor is largely software. This Linux driver was
+ written to multiplex the single HDMA engine to copy disk
+ transactions into a fixed DIMM memory space, from where an ATA
+ engine takes over. As a result, each WRITE looks like this:
+
+ submit HDMA packet to hardware
+ hardware copies data from system memory to DIMM
+ hardware raises interrupt
+
+ submit ATA packet to hardware
+ hardware executes ATA WRITE command, w/ data in DIMM
+ hardware raises interrupt
+
+ and each READ looks like this:
+
+ submit ATA packet to hardware
+ hardware executes ATA READ command, w/ data in DIMM
+ hardware raises interrupt
+
+ submit HDMA packet to hardware
+ hardware copies data from DIMM to system memory
+ hardware raises interrupt
+
+ This is a very slow, lock-step way of doing things that can
+ certainly be improved by motivated kernel hackers.
+
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "sata_promise.h"
+
+#define DRV_NAME "sata_sx4"
+#define DRV_VERSION "0.12"
+
+
+enum {
+ PDC_MMIO_BAR = 3,
+ PDC_DIMM_BAR = 4,
+
+ PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
+
+ PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
+ PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
+ PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
+ PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
+
+ PDC_CTLSTAT = 0x60, /* IDEn control / status */
+
+ PDC_20621_SEQCTL = 0x400,
+ PDC_20621_SEQMASK = 0x480,
+ PDC_20621_GENERAL_CTL = 0x484,
+ PDC_20621_PAGE_SIZE = (32 * 1024),
+
+ /* chosen, not constant, values; we design our own DIMM mem map */
+ PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
+ PDC_20621_DIMM_BASE = 0x00200000,
+ PDC_20621_DIMM_DATA = (64 * 1024),
+ PDC_DIMM_DATA_STEP = (256 * 1024),
+ PDC_DIMM_WINDOW_STEP = (8 * 1024),
+ PDC_DIMM_HOST_PRD = (6 * 1024),
+ PDC_DIMM_HOST_PKT = (128 * 0),
+ PDC_DIMM_HPKT_PRD = (128 * 1),
+ PDC_DIMM_ATA_PKT = (128 * 2),
+ PDC_DIMM_APKT_PRD = (128 * 3),
+ PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
+ PDC_PAGE_WINDOW = 0x40,
+ PDC_PAGE_DATA = PDC_PAGE_WINDOW +
+ (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
+ PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
+
+ PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
+
+ PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
+ (1<<23),
+
+ board_20621 = 0, /* FastTrak S150 SX4 */
+
+ PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
+ PDC_RESET = (1 << 11), /* HDMA/ATA reset */
+ PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
+
+ PDC_MAX_HDMA = 32,
+ PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
+
+ PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
+ PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
+ PDC_I2C_CONTROL = 0x48,
+ PDC_I2C_ADDR_DATA = 0x4C,
+ PDC_DIMM0_CONTROL = 0x80,
+ PDC_DIMM1_CONTROL = 0x84,
+ PDC_SDRAM_CONTROL = 0x88,
+ PDC_I2C_WRITE = 0, /* master -> slave */
+ PDC_I2C_READ = (1 << 6), /* master <- slave */
+ PDC_I2C_START = (1 << 7), /* start I2C proto */
+ PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
+ PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
+ PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
+ PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
+ PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
+ PDC_DIMM_SPD_ROW_NUM = 3,
+ PDC_DIMM_SPD_COLUMN_NUM = 4,
+ PDC_DIMM_SPD_MODULE_ROW = 5,
+ PDC_DIMM_SPD_TYPE = 11,
+ PDC_DIMM_SPD_FRESH_RATE = 12,
+ PDC_DIMM_SPD_BANK_NUM = 17,
+ PDC_DIMM_SPD_CAS_LATENCY = 18,
+ PDC_DIMM_SPD_ATTRIBUTE = 21,
+ PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
+ PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
+ PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
+ PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
+ PDC_DIMM_SPD_SYSTEM_FREQ = 126,
+ PDC_CTL_STATUS = 0x08,
+ PDC_DIMM_WINDOW_CTLR = 0x0C,
+ PDC_TIME_CONTROL = 0x3C,
+ PDC_TIME_PERIOD = 0x40,
+ PDC_TIME_COUNTER = 0x44,
+ PDC_GENERAL_CTLR = 0x484,
+ PCI_PLL_INIT = 0x8A531824,
+ PCI_X_TCOUNT = 0xEE1E5CFF,
+
+ /* PDC_TIME_CONTROL bits */
+ PDC_TIMER_BUZZER = (1 << 10),
+ PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
+ PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
+ PDC_TIMER_ENABLE = (1 << 7),
+ PDC_TIMER_MASK_INT = (1 << 5),
+ PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
+ PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
+ PDC_TIMER_ENABLE |
+ PDC_TIMER_MASK_INT,
+};
+
+
+struct pdc_port_priv {
+ u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
+ u8 *pkt;
+ dma_addr_t pkt_dma;
+};
+
+struct pdc_host_priv {
+ unsigned int doing_hdma;
+ unsigned int hdma_prod;
+ unsigned int hdma_cons;
+ struct {
+ struct ata_queued_cmd *qc;
+ unsigned int seq;
+ unsigned long pkt_ofs;
+ } hdma[32];
+};
+
+
+static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void pdc_eng_timeout(struct ata_port *ap);
+static void pdc_20621_phy_reset(struct ata_port *ap);
+static int pdc_port_start(struct ata_port *ap);
+static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+static unsigned int pdc20621_dimm_init(struct ata_host *host);
+static int pdc20621_detect_dimm(struct ata_host *host);
+static unsigned int pdc20621_i2c_read(struct ata_host *host,
+ u32 device, u32 subaddr, u32 *pdata);
+static int pdc20621_prog_dimm0(struct ata_host *host);
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_get_from_dimm(struct ata_host *host,
+ void *psource, u32 offset, u32 size);
+#endif
+static void pdc20621_put_to_dimm(struct ata_host *host,
+ void *psource, u32 offset, u32 size);
+static void pdc20621_irq_clear(struct ata_port *ap);
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
+
+
+static struct scsi_host_template pdc_sata_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+/* TODO: inherit from base port_ops after converting to new EH */
+static struct ata_port_operations pdc_20621_ops = {
+ .sff_tf_load = pdc_tf_load_mmio,
+ .sff_tf_read = ata_sff_tf_read,
+ .sff_check_status = ata_sff_check_status,
+ .sff_exec_command = pdc_exec_command_mmio,
+ .sff_dev_select = ata_sff_dev_select,
+ .phy_reset = pdc_20621_phy_reset,
+ .qc_prep = pdc20621_qc_prep,
+ .qc_issue = pdc20621_qc_issue,
+ .qc_fill_rtf = ata_sff_qc_fill_rtf,
+ .sff_data_xfer = ata_sff_data_xfer,
+ .eng_timeout = pdc_eng_timeout,
+ .sff_irq_clear = pdc20621_irq_clear,
+ .sff_irq_on = ata_sff_irq_on,
+ .port_start = pdc_port_start,
+};
+
+static const struct ata_port_info pdc_port_info[] = {
+ /* board_20621 */
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO |
+ ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &pdc_20621_ops,
+ },
+
+};
+
+static const struct pci_device_id pdc_sata_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver pdc_sata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = pdc_sata_pci_tbl,
+ .probe = pdc_sata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+
+static int pdc_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct pdc_port_priv *pp;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+
+ ap->private_data = pp;
+
+ return 0;
+}
+
+static void pdc_20621_phy_reset(struct ata_port *ap)
+{
+ VPRINTK("ENTER\n");
+ ap->cbl = ATA_CBL_SATA;
+ ata_port_probe(ap);
+ ata_bus_reset(ap);
+}
+
+static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
+ unsigned int portno,
+ unsigned int total_len)
+{
+ u32 addr;
+ unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
+ __le32 *buf32 = (__le32 *) buf;
+
+ /* output ATA packet S/G table */
+ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+ (PDC_DIMM_DATA_STEP * portno);
+ VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
+ buf32[dw] = cpu_to_le32(addr);
+ buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
+
+ VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
+ PDC_20621_DIMM_BASE +
+ (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_APKT_PRD,
+ buf32[dw], buf32[dw + 1]);
+}
+
+static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
+ unsigned int portno,
+ unsigned int total_len)
+{
+ u32 addr;
+ unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
+ __le32 *buf32 = (__le32 *) buf;
+
+ /* output Host DMA packet S/G table */
+ addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
+ (PDC_DIMM_DATA_STEP * portno);
+
+ buf32[dw] = cpu_to_le32(addr);
+ buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
+
+ VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
+ PDC_20621_DIMM_BASE +
+ (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_HPKT_PRD,
+ buf32[dw], buf32[dw + 1]);
+}
+
+static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
+ unsigned int devno, u8 *buf,
+ unsigned int portno)
+{
+ unsigned int i, dw;
+ __le32 *buf32 = (__le32 *) buf;
+ u8 dev_reg;
+
+ unsigned int dimm_sg = PDC_20621_DIMM_BASE +
+ (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_APKT_PRD;
+ VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
+
+ i = PDC_DIMM_ATA_PKT;
+
+ /*
+ * Set up ATA packet
+ */
+ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
+ buf[i++] = PDC_PKT_READ;
+ else if (tf->protocol == ATA_PROT_NODATA)
+ buf[i++] = PDC_PKT_NODATA;
+ else
+ buf[i++] = 0;
+ buf[i++] = 0; /* reserved */
+ buf[i++] = portno + 1; /* seq. id */
+ buf[i++] = 0xff; /* delay seq. id */
+
+ /* dimm dma S/G, and next-pkt */
+ dw = i >> 2;
+ if (tf->protocol == ATA_PROT_NODATA)
+ buf32[dw] = 0;
+ else
+ buf32[dw] = cpu_to_le32(dimm_sg);
+ buf32[dw + 1] = 0;
+ i += 8;
+
+ if (devno == 0)
+ dev_reg = ATA_DEVICE_OBS;
+ else
+ dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+ /* select device */
+ buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+ buf[i++] = dev_reg;
+
+ /* device control register */
+ buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
+ buf[i++] = tf->ctl;
+
+ return i;
+}
+
+static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
+ unsigned int portno)
+{
+ unsigned int dw;
+ u32 tmp;
+ __le32 *buf32 = (__le32 *) buf;
+
+ unsigned int host_sg = PDC_20621_DIMM_BASE +
+ (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_HOST_PRD;
+ unsigned int dimm_sg = PDC_20621_DIMM_BASE +
+ (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_HPKT_PRD;
+ VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
+ VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
+
+ dw = PDC_DIMM_HOST_PKT >> 2;
+
+ /*
+ * Set up Host DMA packet
+ */
+ if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
+ tmp = PDC_PKT_READ;
+ else
+ tmp = 0;
+ tmp |= ((portno + 1 + 4) << 16); /* seq. id */
+ tmp |= (0xff << 24); /* delay seq. id */
+ buf32[dw + 0] = cpu_to_le32(tmp);
+ buf32[dw + 1] = cpu_to_le32(host_sg);
+ buf32[dw + 2] = cpu_to_le32(dimm_sg);
+ buf32[dw + 3] = 0;
+
+ VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
+ PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
+ PDC_DIMM_HOST_PKT,
+ buf32[dw + 0],
+ buf32[dw + 1],
+ buf32[dw + 2],
+ buf32[dw + 3]);
+}
+
+static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg;
+ struct ata_port *ap = qc->ap;
+ struct pdc_port_priv *pp = ap->private_data;
+ void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+ unsigned int portno = ap->port_no;
+ unsigned int i, si, idx, total_len = 0, sgt_len;
+ __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
+
+ WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
+
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ /*
+ * Build S/G table
+ */
+ idx = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ buf[idx++] = cpu_to_le32(sg_dma_address(sg));
+ buf[idx++] = cpu_to_le32(sg_dma_len(sg));
+ total_len += sg_dma_len(sg);
+ }
+ buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
+ sgt_len = idx * 4;
+
+ /*
+ * Build ATA, host DMA packets
+ */
+ pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
+
+ pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
+ i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
+
+ if (qc->tf.flags & ATA_TFLAG_LBA48)
+ i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
+ else
+ i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
+
+ pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
+
+ /* copy three S/G tables and two packets to DIMM MMIO window */
+ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
+ &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
+ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
+ PDC_DIMM_HOST_PRD,
+ &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
+
+ /* force host FIFO dump */
+ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
+
+ readl(dimm_mmio); /* MMIO PCI posting flush */
+
+ VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
+}
+
+static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pdc_port_priv *pp = ap->private_data;
+ void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+ unsigned int portno = ap->port_no;
+ unsigned int i;
+
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
+
+ if (qc->tf.flags & ATA_TFLAG_LBA48)
+ i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
+ else
+ i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
+
+ pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
+
+ /* copy three S/G tables and two packets to DIMM MMIO window */
+ memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
+ &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
+
+ /* force host FIFO dump */
+ writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
+
+ readl(dimm_mmio); /* MMIO PCI posting flush */
+
+ VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
+}
+
+static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+{
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ pdc20621_dma_prep(qc);
+ break;
+ case ATA_PROT_NODATA:
+ pdc20621_nodata_prep(qc);
+ break;
+ default:
+ break;
+ }
+}
+
+static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
+ unsigned int seq,
+ u32 pkt_ofs)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_host *host = ap->host;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+ readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
+
+ writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
+ readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
+}
+
+static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
+ unsigned int seq,
+ u32 pkt_ofs)
+{
+ struct ata_port *ap = qc->ap;
+ struct pdc_host_priv *pp = ap->host->private_data;
+ unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
+
+ if (!pp->doing_hdma) {
+ __pdc20621_push_hdma(qc, seq, pkt_ofs);
+ pp->doing_hdma = 1;
+ return;
+ }
+
+ pp->hdma[idx].qc = qc;
+ pp->hdma[idx].seq = seq;
+ pp->hdma[idx].pkt_ofs = pkt_ofs;
+ pp->hdma_prod++;
+}
+
+static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pdc_host_priv *pp = ap->host->private_data;
+ unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
+
+ /* if nothing on queue, we're done */
+ if (pp->hdma_prod == pp->hdma_cons) {
+ pp->doing_hdma = 0;
+ return;
+ }
+
+ __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
+ pp->hdma[idx].pkt_ofs);
+ pp->hdma_cons++;
+}
+
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int port_no = ap->port_no;
+ void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
+
+ dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
+ dimm_mmio += PDC_DIMM_HOST_PKT;
+
+ printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
+ printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
+ printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
+ printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
+}
+#else
+static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
+#endif /* ATA_VERBOSE_DEBUG */
+
+static void pdc20621_packet_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_host *host = ap->host;
+ unsigned int port_no = ap->port_no;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+ unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 seq = (u8) (port_no + 1);
+ unsigned int port_ofs;
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ VPRINTK("ata%u: ENTER\n", ap->print_id);
+
+ wmb(); /* flush PRD, pkt writes */
+
+ port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
+
+ /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
+ if (rw && qc->tf.protocol == ATA_PROT_DMA) {
+ seq += 4;
+
+ pdc20621_dump_hdma(qc);
+ pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
+ VPRINTK("queued ofs 0x%x (%u), seq %u\n",
+ port_ofs + PDC_DIMM_HOST_PKT,
+ port_ofs + PDC_DIMM_HOST_PKT,
+ seq);
+ } else {
+ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+ readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
+
+ writel(port_ofs + PDC_DIMM_ATA_PKT,
+ ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+ readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+ VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
+ port_ofs + PDC_DIMM_ATA_PKT,
+ port_ofs + PDC_DIMM_ATA_PKT,
+ seq);
+ }
+}
+
+static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
+{
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ case ATA_PROT_NODATA:
+ pdc20621_packet_start(qc);
+ return 0;
+
+ case ATAPI_PROT_DMA:
+ BUG();
+ break;
+
+ default:
+ break;
+ }
+
+ return ata_sff_qc_issue(qc);
+}
+
+static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc,
+ unsigned int doing_hdma,
+ void __iomem *mmio)
+{
+ unsigned int port_no = ap->port_no;
+ unsigned int port_ofs =
+ PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
+ u8 status;
+ unsigned int handled = 0;
+
+ VPRINTK("ENTER\n");
+
+ if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
+ (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
+
+ /* step two - DMA from DIMM to host */
+ if (doing_hdma) {
+ VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
+ readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+ /* get drive status; clear intr; complete txn */
+ qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+ ata_qc_complete(qc);
+ pdc20621_pop_hdma(qc);
+ }
+
+ /* step one - exec ATA command */
+ else {
+ u8 seq = (u8) (port_no + 1 + 4);
+ VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
+ readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+
+ /* submit hdma pkt */
+ pdc20621_dump_hdma(qc);
+ pdc20621_push_hdma(qc, seq,
+ port_ofs + PDC_DIMM_HOST_PKT);
+ }
+ handled = 1;
+
+ } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
+
+ /* step one - DMA from host to DIMM */
+ if (doing_hdma) {
+ u8 seq = (u8) (port_no + 1);
+ VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
+ readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+
+ /* submit ata pkt */
+ writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
+ readl(mmio + PDC_20621_SEQCTL + (seq * 4));
+ writel(port_ofs + PDC_DIMM_ATA_PKT,
+ ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+ readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
+ }
+
+ /* step two - execute ATA command */
+ else {
+ VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
+ readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
+ /* get drive status; clear intr; complete txn */
+ qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
+ ata_qc_complete(qc);
+ pdc20621_pop_hdma(qc);
+ }
+ handled = 1;
+
+ /* command completion, but no data xfer */
+ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+
+ status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+ DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ qc->err_mask |= ac_err_mask(status);
+ ata_qc_complete(qc);
+ handled = 1;
+
+ } else {
+ ap->stats.idle_irq++;
+ }
+
+ return handled;
+}
+
+static void pdc20621_irq_clear(struct ata_port *ap)
+{
+ struct ata_host *host = ap->host;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+ mmio += PDC_CHIP0_OFS;
+
+ readl(mmio + PDC_20621_SEQMASK);
+}
+
+static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ata_port *ap;
+ u32 mask = 0;
+ unsigned int i, tmp, port_no;
+ unsigned int handled = 0;
+ void __iomem *mmio_base;
+
+ VPRINTK("ENTER\n");
+
+ if (!host || !host->iomap[PDC_MMIO_BAR]) {
+ VPRINTK("QUICK EXIT\n");
+ return IRQ_NONE;
+ }
+
+ mmio_base = host->iomap[PDC_MMIO_BAR];
+
+ /* reading should also clear interrupts */
+ mmio_base += PDC_CHIP0_OFS;
+ mask = readl(mmio_base + PDC_20621_SEQMASK);
+ VPRINTK("mask == 0x%x\n", mask);
+
+ if (mask == 0xffffffff) {
+ VPRINTK("QUICK EXIT 2\n");
+ return IRQ_NONE;
+ }
+ mask &= 0xffff; /* only 16 tags possible */
+ if (!mask) {
+ VPRINTK("QUICK EXIT 3\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock(&host->lock);
+
+ for (i = 1; i < 9; i++) {
+ port_no = i - 1;
+ if (port_no > 3)
+ port_no -= 4;
+ if (port_no >= host->n_ports)
+ ap = NULL;
+ else
+ ap = host->ports[port_no];
+ tmp = mask & (1 << i);
+ VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
+ if (tmp && ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled += pdc20621_host_intr(ap, qc, (i > 4),
+ mmio_base);
+ }
+ }
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("mask == 0x%x\n", mask);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+
+static void pdc_eng_timeout(struct ata_port *ap)
+{
+ u8 drv_stat;
+ struct ata_host *host = ap->host;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+
+ DPRINTK("ENTER\n");
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ case ATA_PROT_NODATA:
+ ata_port_printk(ap, KERN_ERR, "command timeout\n");
+ qc->err_mask |= __ac_err_mask(ata_wait_idle(ap));
+ break;
+
+ default:
+ drv_stat = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+ ata_port_printk(ap, KERN_ERR,
+ "unknown timeout, cmd 0x%x stat 0x%x\n",
+ qc->tf.command, drv_stat);
+
+ qc->err_mask |= ac_err_mask(drv_stat);
+ break;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ ata_eh_qc_complete(qc);
+ DPRINTK("EXIT\n");
+}
+
+static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_NODATA);
+ ata_sff_tf_load(ap, tf);
+}
+
+
+static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_NODATA);
+ ata_sff_exec_command(ap, tf);
+}
+
+
+static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
+{
+ port->cmd_addr = base;
+ port->data_addr = base;
+ port->feature_addr =
+ port->error_addr = base + 0x4;
+ port->nsect_addr = base + 0x8;
+ port->lbal_addr = base + 0xc;
+ port->lbam_addr = base + 0x10;
+ port->lbah_addr = base + 0x14;
+ port->device_addr = base + 0x18;
+ port->command_addr =
+ port->status_addr = base + 0x1c;
+ port->altstatus_addr =
+ port->ctl_addr = base + 0x38;
+}
+
+
+#ifdef ATA_VERBOSE_DEBUG
+static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
+ u32 offset, u32 size)
+{
+ u32 window_size;
+ u16 idx;
+ u8 page_mask;
+ long dist;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+ void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ page_mask = 0x00;
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
+ idx = (u16) (offset / window_size);
+
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+
+ offset -= (idx * window_size);
+ idx++;
+ dist = ((long) (window_size - (offset + size))) >= 0 ? size :
+ (long) (window_size - offset);
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
+ dist);
+
+ psource += dist;
+ size -= dist;
+ for (; (long) size >= (long) window_size ;) {
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio),
+ window_size / 4);
+ psource += window_size;
+ size -= window_size;
+ idx++;
+ }
+
+ if (size) {
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio),
+ size / 4);
+ }
+}
+#endif
+
+
+static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
+ u32 offset, u32 size)
+{
+ u32 window_size;
+ u16 idx;
+ u8 page_mask;
+ long dist;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+ void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ page_mask = 0x00;
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
+ idx = (u16) (offset / window_size);
+
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ offset -= (idx * window_size);
+ idx++;
+ dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
+ (long) (window_size - offset);
+ memcpy_toio(dimm_mmio + offset / 4, psource, dist);
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+
+ psource += dist;
+ size -= dist;
+ for (; (long) size >= (long) window_size ;) {
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ memcpy_toio(dimm_mmio, psource, window_size / 4);
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+ psource += window_size;
+ size -= window_size;
+ idx++;
+ }
+
+ if (size) {
+ writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
+ readl(mmio + PDC_DIMM_WINDOW_CTLR);
+ memcpy_toio(dimm_mmio, psource, size / 4);
+ writel(0x01, mmio + PDC_GENERAL_CTLR);
+ readl(mmio + PDC_GENERAL_CTLR);
+ }
+}
+
+
+static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
+ u32 subaddr, u32 *pdata)
+{
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+ u32 i2creg = 0;
+ u32 status;
+ u32 count = 0;
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ i2creg |= device << 24;
+ i2creg |= subaddr << 16;
+
+ /* Set the device and subaddress */
+ writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
+ readl(mmio + PDC_I2C_ADDR_DATA);
+
+ /* Write Control to perform read operation, mask int */
+ writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
+ mmio + PDC_I2C_CONTROL);
+
+ for (count = 0; count <= 1000; count ++) {
+ status = readl(mmio + PDC_I2C_CONTROL);
+ if (status & PDC_I2C_COMPLETE) {
+ status = readl(mmio + PDC_I2C_ADDR_DATA);
+ break;
+ } else if (count == 1000)
+ return 0;
+ }
+
+ *pdata = (status >> 8) & 0x000000ff;
+ return 1;
+}
+
+
+static int pdc20621_detect_dimm(struct ata_host *host)
+{
+ u32 data = 0;
+ if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
+ if (data == 100)
+ return 100;
+ } else
+ return 0;
+
+ if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
+ if (data <= 0x75)
+ return 133;
+ } else
+ return 0;
+
+ return 0;
+}
+
+
+static int pdc20621_prog_dimm0(struct ata_host *host)
+{
+ u32 spd0[50];
+ u32 data = 0;
+ int size, i;
+ u8 bdimmsize;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+ static const struct {
+ unsigned int reg;
+ unsigned int ofs;
+ } pdc_i2c_read_data [] = {
+ { PDC_DIMM_SPD_TYPE, 11 },
+ { PDC_DIMM_SPD_FRESH_RATE, 12 },
+ { PDC_DIMM_SPD_COLUMN_NUM, 4 },
+ { PDC_DIMM_SPD_ATTRIBUTE, 21 },
+ { PDC_DIMM_SPD_ROW_NUM, 3 },
+ { PDC_DIMM_SPD_BANK_NUM, 17 },
+ { PDC_DIMM_SPD_MODULE_ROW, 5 },
+ { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
+ { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
+ { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
+ { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
+ { PDC_DIMM_SPD_CAS_LATENCY, 18 },
+ };
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
+ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc_i2c_read_data[i].reg,
+ &spd0[pdc_i2c_read_data[i].ofs]);
+
+ data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
+ data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+ ((((spd0[27] + 9) / 10) - 1) << 8) ;
+ data |= (((((spd0[29] > spd0[28])
+ ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
+ data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
+
+ if (spd0[18] & 0x08)
+ data |= ((0x03) << 14);
+ else if (spd0[18] & 0x04)
+ data |= ((0x02) << 14);
+ else if (spd0[18] & 0x01)
+ data |= ((0x01) << 14);
+ else
+ data |= (0 << 14);
+
+ /*
+ Calculate the size of bDIMMSize (power of 2) and
+ merge the DIMM size by program start/end address.
+ */
+
+ bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
+ size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
+ data |= (((size / 16) - 1) << 16);
+ data |= (0 << 23);
+ data |= 8;
+ writel(data, mmio + PDC_DIMM0_CONTROL);
+ readl(mmio + PDC_DIMM0_CONTROL);
+ return size;
+}
+
+
+static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
+{
+ u32 data, spd0;
+ int error, i;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ /*
+ Set To Default : DIMM Module Global Control Register (0x022259F1)
+ DIMM Arbitration Disable (bit 20)
+ DIMM Data/Control Output Driving Selection (bit12 - bit15)
+ Refresh Enable (bit 17)
+ */
+
+ data = 0x022259F1;
+ writel(data, mmio + PDC_SDRAM_CONTROL);
+ readl(mmio + PDC_SDRAM_CONTROL);
+
+ /* Turn on for ECC */
+ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0);
+ if (spd0 == 0x02) {
+ data |= (0x01 << 16);
+ writel(data, mmio + PDC_SDRAM_CONTROL);
+ readl(mmio + PDC_SDRAM_CONTROL);
+ printk(KERN_ERR "Local DIMM ECC Enabled\n");
+ }
+
+ /* DIMM Initialization Select/Enable (bit 18/19) */
+ data &= (~(1<<18));
+ data |= (1<<19);
+ writel(data, mmio + PDC_SDRAM_CONTROL);
+
+ error = 1;
+ for (i = 1; i <= 10; i++) { /* polling ~5 secs */
+ data = readl(mmio + PDC_SDRAM_CONTROL);
+ if (!(data & (1<<19))) {
+ error = 0;
+ break;
+ }
+ msleep(i*100);
+ }
+ return error;
+}
+
+
+static unsigned int pdc20621_dimm_init(struct ata_host *host)
+{
+ int speed, size, length;
+ u32 addr, spd0, pci_status;
+ u32 tmp = 0;
+ u32 time_period = 0;
+ u32 tcount = 0;
+ u32 ticks = 0;
+ u32 clock = 0;
+ u32 fparam = 0;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ /* Initialize PLL based upon PCI Bus Frequency */
+
+ /* Initialize Time Period Register */
+ writel(0xffffffff, mmio + PDC_TIME_PERIOD);
+ time_period = readl(mmio + PDC_TIME_PERIOD);
+ VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
+
+ /* Enable timer */
+ writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
+ readl(mmio + PDC_TIME_CONTROL);
+
+ /* Wait 3 seconds */
+ msleep(3000);
+
+ /*
+ When timer is enabled, counter is decreased every internal
+ clock cycle.
+ */
+
+ tcount = readl(mmio + PDC_TIME_COUNTER);
+ VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
+
+ /*
+ If SX4 is on PCI-X bus, after 3 seconds, the timer counter
+ register should be >= (0xffffffff - 3x10^8).
+ */
+ if (tcount >= PCI_X_TCOUNT) {
+ ticks = (time_period - tcount);
+ VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
+
+ clock = (ticks / 300000);
+ VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
+
+ clock = (clock * 33);
+ VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
+
+ /* PLL F Param (bit 22:16) */
+ fparam = (1400000 / clock) - 2;
+ VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
+
+ /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
+ pci_status = (0x8a001824 | (fparam << 16));
+ } else
+ pci_status = PCI_PLL_INIT;
+
+ /* Initialize PLL. */
+ VPRINTK("pci_status: 0x%x\n", pci_status);
+ writel(pci_status, mmio + PDC_CTL_STATUS);
+ readl(mmio + PDC_CTL_STATUS);
+
+ /*
+ Read SPD of DIMM by I2C interface,
+ and program the DIMM Module Controller.
+ */
+ if (!(speed = pdc20621_detect_dimm(host))) {
+ printk(KERN_ERR "Detect Local DIMM Fail\n");
+ return 1; /* DIMM error */
+ }
+ VPRINTK("Local DIMM Speed = %d\n", speed);
+
+ /* Programming DIMM0 Module Control Register (index_CID0:80h) */
+ size = pdc20621_prog_dimm0(host);
+ VPRINTK("Local DIMM Size = %dMB\n", size);
+
+ /* Programming DIMM Module Global Control Register (index_CID0:88h) */
+ if (pdc20621_prog_dimm_global(host)) {
+ printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
+ return 1;
+ }
+
+#ifdef ATA_VERBOSE_DEBUG
+ {
+ u8 test_parttern1[40] =
+ {0x55,0xAA,'P','r','o','m','i','s','e',' ',
+ 'N','o','t',' ','Y','e','t',' ',
+ 'D','e','f','i','n','e','d',' ',
+ '1','.','1','0',
+ '9','8','0','3','1','6','1','2',0,0};
+ u8 test_parttern2[40] = {0};
+
+ pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
+
+ pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
+ pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
+ 40);
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
+
+ pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
+ }
+#endif
+
+ /* ECC initiliazation. */
+
+ pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0);
+ if (spd0 == 0x02) {
+ VPRINTK("Start ECC initialization\n");
+ addr = 0;
+ length = size * 1024 * 1024;
+ while (addr < length) {
+ pdc20621_put_to_dimm(host, (void *) &tmp, addr,
+ sizeof(u32));
+ addr += sizeof(u32);
+ }
+ VPRINTK("Finish ECC initialization\n");
+ }
+ return 0;
+}
+
+
+static void pdc_20621_init(struct ata_host *host)
+{
+ u32 tmp;
+ void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
+
+ /* hard-code chip #0 */
+ mmio += PDC_CHIP0_OFS;
+
+ /*
+ * Select page 0x40 for our 32k DIMM window
+ */
+ tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
+ tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
+ writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
+
+ /*
+ * Reset Host DMA
+ */
+ tmp = readl(mmio + PDC_HDMA_CTLSTAT);
+ tmp |= PDC_RESET;
+ writel(tmp, mmio + PDC_HDMA_CTLSTAT);
+ readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
+
+ udelay(10);
+
+ tmp = readl(mmio + PDC_HDMA_CTLSTAT);
+ tmp &= ~PDC_RESET;
+ writel(tmp, mmio + PDC_HDMA_CTLSTAT);
+ readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
+}
+
+static int pdc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] =
+ { &pdc_port_info[ent->driver_data], NULL };
+ struct ata_host *host;
+ struct pdc_host_priv *hpriv;
+ int i, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* allocate host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!host || !hpriv)
+ return -ENOMEM;
+
+ host->private_data = hpriv;
+
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
+ DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ for (i = 0; i < 4; i++) {
+ struct ata_port *ap = host->ports[i];
+ void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
+ unsigned int offset = 0x200 + i * 0x80;
+
+ pdc_sata_setup_port(&ap->ioaddr, base + offset);
+
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
+ ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
+ }
+
+ /* configure and activate */
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ if (pdc20621_dimm_init(host))
+ return -ENOMEM;
+ pdc_20621_init(host);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
+ IRQF_SHARED, &pdc_sata_sht);
+}
+
+
+static int __init pdc_sata_init(void)
+{
+ return pci_register_driver(&pdc_sata_pci_driver);
+}
+
+
+static void __exit pdc_sata_exit(void)
+{
+ pci_unregister_driver(&pdc_sata_pci_driver);
+}
+
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Promise SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(pdc_sata_init);
+module_exit(pdc_sata_exit);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
new file mode 100644
index 0000000..019575b
--- /dev/null
+++ b/drivers/ata/sata_uli.c
@@ -0,0 +1,262 @@
+/*
+ * sata_uli.c - ULi Electronics SATA
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_uli"
+#define DRV_VERSION "1.3"
+
+enum {
+ uli_5289 = 0,
+ uli_5287 = 1,
+ uli_5281 = 2,
+
+ uli_max_ports = 4,
+
+ /* PCI configuration registers */
+ ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
+ ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
+ ULI5281_BASE = 0x60, /* sata0 phy SCR registers */
+ ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
+};
+
+struct uli_priv {
+ unsigned int scr_cfg_addr[uli_max_ports];
+};
+
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+
+static const struct pci_device_id uli_pci_tbl[] = {
+ { PCI_VDEVICE(AL, 0x5289), uli_5289 },
+ { PCI_VDEVICE(AL, 0x5287), uli_5287 },
+ { PCI_VDEVICE(AL, 0x5281), uli_5281 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver uli_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = uli_pci_tbl,
+ .probe = uli_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static struct scsi_host_template uli_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations uli_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .scr_read = uli_scr_read,
+ .scr_write = uli_scr_write,
+ .hardreset = ATA_OP_NULL,
+};
+
+static const struct ata_port_info uli_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_IGN_SIMPLEX,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &uli_ops,
+};
+
+
+MODULE_AUTHOR("Peer Chen");
+MODULE_DESCRIPTION("low-level driver for ULi Electronics SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, uli_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
+{
+ struct uli_priv *hpriv = ap->host->private_data;
+ return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
+}
+
+static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
+{
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
+ u32 val;
+
+ pci_read_config_dword(pdev, cfg_addr, &val);
+ return val;
+}
+
+static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
+
+ pci_write_config_dword(pdev, cfg_addr, val);
+}
+
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+
+ *val = uli_scr_cfg_read(link, sc_reg);
+ return 0;
+}
+
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
+ return -EINVAL;
+
+ uli_scr_cfg_write(link, sc_reg, val);
+ return 0;
+}
+
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
+ unsigned int board_idx = (unsigned int) ent->driver_data;
+ struct ata_host *host;
+ struct uli_priv *hpriv;
+ void __iomem * const *iomap;
+ struct ata_ioports *ioaddr;
+ int n_ports, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ n_ports = 2;
+ if (board_idx == uli_5287)
+ n_ports = 4;
+
+ /* allocate the host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ if (!host)
+ return -ENOMEM;
+
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+
+ /* the first two ports are standard SFF */
+ rc = ata_pci_sff_init_host(host);
+ if (rc)
+ return rc;
+
+ rc = ata_pci_bmdma_init(host);
+ if (rc)
+ return rc;
+
+ iomap = host->iomap;
+
+ switch (board_idx) {
+ case uli_5287:
+ /* If there are four, the last two live right after
+ * the standard SFF ports.
+ */
+ hpriv->scr_cfg_addr[0] = ULI5287_BASE;
+ hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
+
+ ioaddr = &host->ports[2]->ioaddr;
+ ioaddr->cmd_addr = iomap[0] + 8;
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = (void __iomem *)
+ ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
+ ioaddr->bmdma_addr = iomap[4] + 16;
+ hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_desc(host->ports[2],
+ "cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, 0) + 8,
+ ((unsigned long long)pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4,
+ (unsigned long long)pci_resource_start(pdev, 4) + 16);
+
+ ioaddr = &host->ports[3]->ioaddr;
+ ioaddr->cmd_addr = iomap[2] + 8;
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = (void __iomem *)
+ ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
+ ioaddr->bmdma_addr = iomap[4] + 24;
+ hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_desc(host->ports[2],
+ "cmd 0x%llx ctl 0x%llx bmdma 0x%llx",
+ (unsigned long long)pci_resource_start(pdev, 2) + 9,
+ ((unsigned long long)pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4,
+ (unsigned long long)pci_resource_start(pdev, 4) + 24);
+
+ break;
+
+ case uli_5289:
+ hpriv->scr_cfg_addr[0] = ULI5287_BASE;
+ hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
+ break;
+
+ case uli_5281:
+ hpriv->scr_cfg_addr[0] = ULI5281_BASE;
+ hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ pci_set_master(pdev);
+ pci_intx(pdev, 1);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &uli_sht);
+}
+
+static int __init uli_init(void)
+{
+ return pci_register_driver(&uli_pci_driver);
+}
+
+static void __exit uli_exit(void)
+{
+ pci_unregister_driver(&uli_pci_driver);
+}
+
+
+module_init(uli_init);
+module_exit(uli_exit);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
new file mode 100644
index 0000000..5c62da9
--- /dev/null
+++ b/drivers/ata/sata_via.c
@@ -0,0 +1,630 @@
+/*
+ * sata_via.c - VIA Serial ATA controllers
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_via"
+#define DRV_VERSION "2.4"
+
+/*
+ * vt8251 is different from other sata controllers of VIA. It has two
+ * channels, each channel has both Master and Slave slot.
+ */
+enum board_ids_enum {
+ vt6420,
+ vt6421,
+ vt8251,
+};
+
+enum {
+ SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
+ SATA_INT_GATE = 0x41, /* SATA interrupt gating */
+ SATA_NATIVE_MODE = 0x42, /* Native mode enable */
+ PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
+ PATA_PIO_TIMING = 0xAB, /* PATA timing register */
+
+ PORT0 = (1 << 1),
+ PORT1 = (1 << 0),
+ ALL_PORTS = PORT0 | PORT1,
+
+ NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
+
+ SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
+};
+
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
+static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
+static void svia_noop_freeze(struct ata_port *ap);
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
+static int vt6421_pata_cable_detect(struct ata_port *ap);
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
+
+static const struct pci_device_id svia_pci_tbl[] = {
+ { PCI_VDEVICE(VIA, 0x5337), vt6420 },
+ { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
+ { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
+ { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
+ { PCI_VDEVICE(VIA, 0x5372), vt6420 },
+ { PCI_VDEVICE(VIA, 0x7372), vt6420 },
+ { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
+ { PCI_VDEVICE(VIA, 0x9000), vt8251 },
+ { PCI_VDEVICE(VIA, 0x9040), vt8251 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver svia_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = svia_pci_tbl,
+ .probe = svia_init_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+ .remove = ata_pci_remove_one,
+};
+
+static struct scsi_host_template svia_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations svia_base_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = svia_tf_load,
+};
+
+static struct ata_port_operations vt6420_sata_ops = {
+ .inherits = &svia_base_ops,
+ .freeze = svia_noop_freeze,
+ .prereset = vt6420_prereset,
+};
+
+static struct ata_port_operations vt6421_pata_ops = {
+ .inherits = &svia_base_ops,
+ .cable_detect = vt6421_pata_cable_detect,
+ .set_piomode = vt6421_set_pio_mode,
+ .set_dmamode = vt6421_set_dma_mode,
+};
+
+static struct ata_port_operations vt6421_sata_ops = {
+ .inherits = &svia_base_ops,
+ .scr_read = svia_scr_read,
+ .scr_write = svia_scr_write,
+};
+
+static struct ata_port_operations vt8251_ops = {
+ .inherits = &svia_base_ops,
+ .hardreset = sata_std_hardreset,
+ .scr_read = vt8251_scr_read,
+ .scr_write = vt8251_scr_write,
+};
+
+static const struct ata_port_info vt6420_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &vt6420_sata_ops,
+};
+
+static struct ata_port_info vt6421_sport_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &vt6421_sata_ops,
+};
+
+static struct ata_port_info vt6421_pport_info = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &vt6421_pata_ops,
+};
+
+static struct ata_port_info vt8251_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS |
+ ATA_FLAG_NO_LEGACY,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &vt8251_ops,
+};
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
+ return 0;
+}
+
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
+ return 0;
+}
+
+static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ int slot = 2 * link->ap->port_no + link->pmp;
+ u32 v = 0;
+ u8 raw;
+
+ switch (scr) {
+ case SCR_STATUS:
+ pci_read_config_byte(pdev, 0xA0 + slot, &raw);
+
+ /* read the DET field, bit0 and 1 of the config byte */
+ v |= raw & 0x03;
+
+ /* read the SPD field, bit4 of the configure byte */
+ if (raw & (1 << 4))
+ v |= 0x02 << 4;
+ else
+ v |= 0x01 << 4;
+
+ /* read the IPM field, bit2 and 3 of the config byte */
+ v |= ipm_tbl[(raw >> 2) & 0x3];
+ break;
+
+ case SCR_ERROR:
+ /* devices other than 5287 uses 0xA8 as base */
+ WARN_ON(pdev->device != 0x5287);
+ pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
+ break;
+
+ case SCR_CONTROL:
+ pci_read_config_byte(pdev, 0xA4 + slot, &raw);
+
+ /* read the DET field, bit0 and bit1 */
+ v |= ((raw & 0x02) << 1) | (raw & 0x01);
+
+ /* read the IPM field, bit2 and bit3 */
+ v |= ((raw >> 2) & 0x03) << 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *val = v;
+ return 0;
+}
+
+static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ int slot = 2 * link->ap->port_no + link->pmp;
+ u32 v = 0;
+
+ switch (scr) {
+ case SCR_ERROR:
+ /* devices other than 5287 uses 0xA8 as base */
+ WARN_ON(pdev->device != 0x5287);
+ pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
+ return 0;
+
+ case SCR_CONTROL:
+ /* set the DET field */
+ v |= ((val & 0x4) >> 1) | (val & 0x1);
+
+ /* set the IPM field */
+ v |= ((val >> 8) & 0x3) << 2;
+
+ pci_write_config_byte(pdev, 0xA4 + slot, v);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * svia_tf_load - send taskfile registers to host controller
+ * @ap: Port to which output is sent
+ * @tf: ATA taskfile register set
+ *
+ * Outputs ATA taskfile to standard ATA host controller.
+ *
+ * This is to fix the internal bug of via chipsets, which will
+ * reset the device register after changing the IEN bit on ctl
+ * register.
+ */
+static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_taskfile ttf;
+
+ if (tf->ctl != ap->last_ctl) {
+ ttf = *tf;
+ ttf.flags |= ATA_TFLAG_DEVICE;
+ tf = &ttf;
+ }
+ ata_sff_tf_load(ap, tf);
+}
+
+static void svia_noop_freeze(struct ata_port *ap)
+{
+ /* Some VIA controllers choke if ATA_NIEN is manipulated in
+ * certain way. Leave it alone and just clear pending IRQ.
+ */
+ ap->ops->sff_check_status(ap);
+ ata_sff_irq_clear(ap);
+}
+
+/**
+ * vt6420_prereset - prereset for vt6420
+ * @link: target ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * SCR registers on vt6420 are pieces of shit and may hang the
+ * whole machine completely if accessed with the wrong timing.
+ * To avoid such catastrophe, vt6420 doesn't provide generic SCR
+ * access operations, but uses SStatus and SControl only during
+ * boot probing in controlled way.
+ *
+ * As the old (pre EH update) probing code is proven to work, we
+ * strictly follow the access pattern.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+ unsigned long timeout = jiffies + (HZ * 5);
+ u32 sstatus, scontrol;
+ int online;
+
+ /* don't do any SCR stuff if we're not loading */
+ if (!(ap->pflags & ATA_PFLAG_LOADING))
+ goto skip_scr;
+
+ /* Resume phy. This is the old SATA resume sequence */
+ svia_scr_write(link, SCR_CONTROL, 0x300);
+ svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
+
+ /* wait for phy to become ready, if necessary */
+ do {
+ msleep(200);
+ svia_scr_read(link, SCR_STATUS, &sstatus);
+ if ((sstatus & 0xf) != 1)
+ break;
+ } while (time_before(jiffies, timeout));
+
+ /* open code sata_print_link_status() */
+ svia_scr_read(link, SCR_STATUS, &sstatus);
+ svia_scr_read(link, SCR_CONTROL, &scontrol);
+
+ online = (sstatus & 0xf) == 0x3;
+
+ ata_port_printk(ap, KERN_INFO,
+ "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
+ online ? "up" : "down", sstatus, scontrol);
+
+ /* SStatus is read one more time */
+ svia_scr_read(link, SCR_STATUS, &sstatus);
+
+ if (!online) {
+ /* tell EH to bail */
+ ehc->i.action &= ~ATA_EH_RESET;
+ return 0;
+ }
+
+ skip_scr:
+ /* wait for !BSY */
+ ata_sff_wait_ready(link, deadline);
+
+ return 0;
+}
+
+static int vt6421_pata_cable_detect(struct ata_port *ap)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u8 tmp;
+
+ pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
+ if (tmp & 0x10)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
+ pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]);
+}
+
+static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
+ pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]);
+}
+
+static const unsigned int svia_bar_sizes[] = {
+ 8, 4, 8, 4, 16, 256
+};
+
+static const unsigned int vt6421_bar_sizes[] = {
+ 16, 16, 16, 16, 32, 128
+};
+
+static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
+{
+ return addr + (port * 128);
+}
+
+static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
+{
+ return addr + (port * 64);
+}
+
+static void vt6421_init_addrs(struct ata_port *ap)
+{
+ void __iomem * const * iomap = ap->host->iomap;
+ void __iomem *reg_addr = iomap[ap->port_no];
+ void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ioaddr->cmd_addr = reg_addr;
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = (void __iomem *)
+ ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
+ ioaddr->bmdma_addr = bmdma_addr;
+ ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
+
+ ata_sff_std_ports(ioaddr);
+
+ ata_port_pbar_desc(ap, ap->port_no, -1, "port");
+ ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
+}
+
+static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+ const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
+ struct ata_host *host;
+ int rc;
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ *r_host = host;
+
+ rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ return rc;
+ }
+
+ host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
+ host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
+
+ return 0;
+}
+
+static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+ const struct ata_port_info *ppi[] =
+ { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
+ struct ata_host *host;
+ int i, rc;
+
+ *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
+ if (!host) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ return -ENOMEM;
+ }
+
+ rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
+ "PCI BARs (errno=%d)\n", rc);
+ return rc;
+ }
+ host->iomap = pcim_iomap_table(pdev);
+
+ for (i = 0; i < host->n_ports; i++)
+ vt6421_init_addrs(host->ports[i]);
+
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
+{
+ const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
+ struct ata_host *host;
+ int i, rc;
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ *r_host = host;
+
+ rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ return rc;
+ }
+
+ /* 8251 hosts four sata ports as M/S of the two channels */
+ for (i = 0; i < host->n_ports; i++)
+ ata_slave_link_init(host->ports[i]);
+
+ return 0;
+}
+
+static void svia_configure(struct pci_dev *pdev)
+{
+ u8 tmp8;
+
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
+ dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
+ (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
+
+ /* make sure SATA channels are enabled */
+ pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
+ if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "enabling SATA channels (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= ALL_PORTS;
+ pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
+ }
+
+ /* make sure interrupts for each channel sent to us */
+ pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
+ if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "enabling SATA channel interrupts (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= ALL_PORTS;
+ pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
+ }
+
+ /* make sure native mode is enabled */
+ pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
+ if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "enabling SATA channel native mode (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= NATIVE_MODE_ALL;
+ pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
+ }
+}
+
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
+ unsigned int i;
+ int rc;
+ struct ata_host *host;
+ int board_id = (int) ent->driver_data;
+ const unsigned *bar_sizes;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ if (board_id == vt6421)
+ bar_sizes = &vt6421_bar_sizes[0];
+ else
+ bar_sizes = &svia_bar_sizes[0];
+
+ for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
+ if ((pci_resource_start(pdev, i) == 0) ||
+ (pci_resource_len(pdev, i) < bar_sizes[i])) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
+ i,
+ (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long long)pci_resource_len(pdev, i));
+ return -ENODEV;
+ }
+
+ switch (board_id) {
+ case vt6420:
+ rc = vt6420_prepare_host(pdev, &host);
+ break;
+ case vt6421:
+ rc = vt6421_prepare_host(pdev, &host);
+ break;
+ case vt8251:
+ rc = vt8251_prepare_host(pdev, &host);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ if (rc)
+ return rc;
+
+ svia_configure(pdev);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
+ IRQF_SHARED, &svia_sht);
+}
+
+static int __init svia_init(void)
+{
+ return pci_register_driver(&svia_pci_driver);
+}
+
+static void __exit svia_exit(void)
+{
+ pci_unregister_driver(&svia_pci_driver);
+}
+
+module_init(svia_init);
+module_exit(svia_exit);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
new file mode 100644
index 0000000..c57cdff
--- /dev/null
+++ b/drivers/ata/sata_vsc.c
@@ -0,0 +1,463 @@
+/*
+ * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
+ *
+ * Maintained by: Jeremy Higdon @ SGI
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004 SGI
+ *
+ * Bits from Jeff Garzik, Copyright RedHat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Vitesse hardware documentation presumably available under NDA.
+ * Intel 31244 (same hardware interface) documentation presumably
+ * available from http://developer.intel.com/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+
+#define DRV_NAME "sata_vsc"
+#define DRV_VERSION "2.3"
+
+enum {
+ VSC_MMIO_BAR = 0,
+
+ /* Interrupt register offsets (from chip base address) */
+ VSC_SATA_INT_STAT_OFFSET = 0x00,
+ VSC_SATA_INT_MASK_OFFSET = 0x04,
+
+ /* Taskfile registers offsets */
+ VSC_SATA_TF_CMD_OFFSET = 0x00,
+ VSC_SATA_TF_DATA_OFFSET = 0x00,
+ VSC_SATA_TF_ERROR_OFFSET = 0x04,
+ VSC_SATA_TF_FEATURE_OFFSET = 0x06,
+ VSC_SATA_TF_NSECT_OFFSET = 0x08,
+ VSC_SATA_TF_LBAL_OFFSET = 0x0c,
+ VSC_SATA_TF_LBAM_OFFSET = 0x10,
+ VSC_SATA_TF_LBAH_OFFSET = 0x14,
+ VSC_SATA_TF_DEVICE_OFFSET = 0x18,
+ VSC_SATA_TF_STATUS_OFFSET = 0x1c,
+ VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
+ VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
+ VSC_SATA_TF_CTL_OFFSET = 0x29,
+
+ /* DMA base */
+ VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
+ VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
+ VSC_SATA_DMA_CMD_OFFSET = 0x70,
+
+ /* SCRs base */
+ VSC_SATA_SCR_STATUS_OFFSET = 0x100,
+ VSC_SATA_SCR_ERROR_OFFSET = 0x104,
+ VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
+
+ /* Port stride */
+ VSC_SATA_PORT_OFFSET = 0x200,
+
+ /* Error interrupt status bit offsets */
+ VSC_SATA_INT_ERROR_CRC = 0x40,
+ VSC_SATA_INT_ERROR_T = 0x20,
+ VSC_SATA_INT_ERROR_P = 0x10,
+ VSC_SATA_INT_ERROR_R = 0x8,
+ VSC_SATA_INT_ERROR_E = 0x4,
+ VSC_SATA_INT_ERROR_M = 0x2,
+ VSC_SATA_INT_PHY_CHANGE = 0x1,
+ VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
+ VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
+ VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
+ VSC_SATA_INT_PHY_CHANGE),
+};
+
+static int vsc_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+
+static int vsc_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
+{
+ if (sc_reg > SCR_CONTROL)
+ return -EINVAL;
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
+}
+
+
+static void vsc_freeze(struct ata_port *ap)
+{
+ void __iomem *mask_addr;
+
+ mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+ VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+ writeb(0, mask_addr);
+}
+
+
+static void vsc_thaw(struct ata_port *ap)
+{
+ void __iomem *mask_addr;
+
+ mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+ VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+
+ writeb(0xff, mask_addr);
+}
+
+
+static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
+{
+ void __iomem *mask_addr;
+ u8 mask;
+
+ mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
+ VSC_SATA_INT_MASK_OFFSET + ap->port_no;
+ mask = readb(mask_addr);
+ if (ctl & ATA_NIEN)
+ mask |= 0x80;
+ else
+ mask &= 0x7F;
+ writeb(mask, mask_addr);
+}
+
+
+static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ /*
+ * The only thing the ctl register is used for is SRST.
+ * That is not enabled or disabled via tf_load.
+ * However, if ATA_NIEN is changed, then we need to change
+ * the interrupt register.
+ */
+ if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
+ ap->last_ctl = tf->ctl;
+ vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
+ }
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ writew(tf->feature | (((u16)tf->hob_feature) << 8),
+ ioaddr->feature_addr);
+ writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
+ ioaddr->nsect_addr);
+ writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
+ ioaddr->lbal_addr);
+ writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
+ ioaddr->lbam_addr);
+ writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
+ ioaddr->lbah_addr);
+ } else if (is_addr) {
+ writew(tf->feature, ioaddr->feature_addr);
+ writew(tf->nsect, ioaddr->nsect_addr);
+ writew(tf->lbal, ioaddr->lbal_addr);
+ writew(tf->lbam, ioaddr->lbam_addr);
+ writew(tf->lbah, ioaddr->lbah_addr);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ writeb(tf->device, ioaddr->device_addr);
+
+ ata_wait_idle(ap);
+}
+
+
+static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ u16 nsect, lbal, lbam, lbah, feature;
+
+ tf->command = ata_sff_check_status(ap);
+ tf->device = readw(ioaddr->device_addr);
+ feature = readw(ioaddr->error_addr);
+ nsect = readw(ioaddr->nsect_addr);
+ lbal = readw(ioaddr->lbal_addr);
+ lbam = readw(ioaddr->lbam_addr);
+ lbah = readw(ioaddr->lbah_addr);
+
+ tf->feature = feature;
+ tf->nsect = nsect;
+ tf->lbal = lbal;
+ tf->lbam = lbam;
+ tf->lbah = lbah;
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ tf->hob_feature = feature >> 8;
+ tf->hob_nsect = nsect >> 8;
+ tf->hob_lbal = lbal >> 8;
+ tf->hob_lbam = lbam >> 8;
+ tf->hob_lbah = lbah >> 8;
+ }
+}
+
+static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
+{
+ if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+}
+
+static void vsc_port_intr(u8 port_status, struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ int handled = 0;
+
+ if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
+ vsc_error_intr(port_status, ap);
+ return;
+ }
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
+ handled = ata_sff_host_intr(ap, qc);
+
+ /* We received an interrupt during a polled command,
+ * or some other spurious condition. Interrupt reporting
+ * with this hardware is fairly reliable so it is safe to
+ * simply clear the interrupt
+ */
+ if (unlikely(!handled))
+ ap->ops->sff_check_status(ap);
+}
+
+/*
+ * vsc_sata_interrupt
+ *
+ * Read the interrupt register and process for the devices that have
+ * them pending.
+ */
+static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ u32 status;
+
+ status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
+
+ if (unlikely(status == 0xffffffff || status == 0)) {
+ if (status)
+ dev_printk(KERN_ERR, host->dev,
+ ": IRQ status == 0xffffffff, "
+ "PCI fault or device removal?\n");
+ goto out;
+ }
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ u8 port_status = (status >> (8 * i)) & 0xff;
+ if (port_status) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ vsc_port_intr(port_status, ap);
+ handled++;
+ } else
+ dev_printk(KERN_ERR, host->dev,
+ "interrupt from disabled port %d\n", i);
+ }
+ }
+
+ spin_unlock(&host->lock);
+out:
+ return IRQ_RETVAL(handled);
+}
+
+
+static struct scsi_host_template vsc_sata_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+
+static struct ata_port_operations vsc_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .sff_tf_load = vsc_sata_tf_load,
+ .sff_tf_read = vsc_sata_tf_read,
+ .freeze = vsc_freeze,
+ .thaw = vsc_thaw,
+ .scr_read = vsc_sata_scr_read,
+ .scr_write = vsc_sata_scr_write,
+};
+
+static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
+ void __iomem *base)
+{
+ port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
+ port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
+ port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
+ port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
+ port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
+ port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
+ port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
+ port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
+ port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
+ port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
+ port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
+ port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
+ port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
+ port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
+ port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
+ writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
+ writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
+}
+
+
+static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static const struct ata_port_info pi = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO,
+ .pio_mask = 0x1f,
+ .mwdma_mask = 0x07,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &vsc_sata_ops,
+ };
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ static int printed_version;
+ struct ata_host *host;
+ void __iomem *mmio_base;
+ int i, rc;
+ u8 cls;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /* allocate host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
+ if (!host)
+ return -ENOMEM;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* check if we have needed resource mapped */
+ if (pci_resource_len(pdev, 0) == 0)
+ return -ENODEV;
+
+ /* map IO regions and intialize host accordingly */
+ rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+
+ mmio_base = host->iomap[VSC_MMIO_BAR];
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
+
+ vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
+
+ ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
+ }
+
+ /*
+ * Use 32 bit DMA mask, because 64 bit address support is poor.
+ */
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc)
+ return rc;
+
+ /*
+ * Due to a bug in the chip, the default cache line size can't be
+ * used (unless the default is non-zero).
+ */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
+ if (cls == 0x00)
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
+
+ if (pci_enable_msi(pdev) == 0)
+ pci_intx(pdev, 0);
+
+ /*
+ * Config offset 0x98 is "Extended Control and Status Register 0"
+ * Default value is (1 << 28). All bits except bit 28 are reserved in
+ * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
+ * If bit 28 is clear, each port has its own LED.
+ */
+ pci_write_config_dword(pdev, 0x98, 0);
+
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
+ IRQF_SHARED, &vsc_sata_sht);
+}
+
+static const struct pci_device_id vsc_sata_pci_tbl[] = {
+ { PCI_VENDOR_ID_VITESSE, 0x7174,
+ PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
+ { PCI_VENDOR_ID_INTEL, 0x3200,
+ PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
+
+ { } /* terminate list */
+};
+
+static struct pci_driver vsc_sata_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = vsc_sata_pci_tbl,
+ .probe = vsc_sata_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+static int __init vsc_sata_init(void)
+{
+ return pci_register_driver(&vsc_sata_pci_driver);
+}
+
+static void __exit vsc_sata_exit(void)
+{
+ pci_unregister_driver(&vsc_sata_pci_driver);
+}
+
+MODULE_AUTHOR("Jeremy Higdon");
+MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(vsc_sata_init);
+module_exit(vsc_sata_exit);
diff --git a/drivers/ata/sis.h b/drivers/ata/sis.h
new file mode 100644
index 0000000..f7f3eeb
--- /dev/null
+++ b/drivers/ata/sis.h
@@ -0,0 +1,5 @@
+
+struct ata_port_info;
+
+/* pata_sis.c */
+extern const struct ata_port_info sis_info133_for_sata;
OpenPOWER on IntegriCloud