diff options
Diffstat (limited to 'drivers/ata')
79 files changed, 4934 insertions, 4406 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 01c52c415bd..aa85a98d3a4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -57,6 +57,8 @@ config SATA_PMP This option adds support for SATA Port Multipliers (the SATA version of an ethernet hub, or SAS expander). +comment "Controllers with non-SFF native interface" + config SATA_AHCI tristate "AHCI SATA support" depends on PCI @@ -65,11 +67,11 @@ config SATA_AHCI If unsure, say N. -config SATA_SIL24 - tristate "Silicon Image 3124/3132 SATA support" - depends on PCI +config SATA_AHCI_PLATFORM + tristate "Platform AHCI SATA support" help - This option enables support for Silicon Image 3124/3132 Serial ATA. + This option enables support for Platform AHCI Serial ATA + controllers. If unsure, say N. @@ -82,6 +84,20 @@ config SATA_FSL If unsure, say N. +config SATA_INIC162X + tristate "Initio 162x SATA support" + depends on PCI + help + This option enables support for Initio 162x Serial ATA. + +config SATA_SIL24 + tristate "Silicon Image 3124/3132 SATA support" + depends on PCI + help + This option enables support for Silicon Image 3124/3132 Serial ATA. + + If unsure, say N. + config ATA_SFF bool "ATA SFF support" default y @@ -102,15 +118,65 @@ config ATA_SFF if ATA_SFF -config SATA_SVW - tristate "ServerWorks Frodo / Apple K2 SATA support" +comment "SFF controllers with custom DMA interface" + +config PDC_ADMA + tristate "Pacific Digital ADMA support" depends on PCI help - This option enables support for Broadcom/Serverworks/Apple K2 - SATA support. + This option enables support for Pacific Digital ADMA controllers + + If unsure, say N. + +config PATA_MPC52xx + tristate "Freescale MPC52xx SoC internal IDE" + depends on PPC_MPC52xx && PPC_BESTCOMM + select PPC_BESTCOMM_ATA + help + This option enables support for integrated IDE controller + of the Freescale MPC52xx SoC. + + If unsure, say N. + +config PATA_OCTEON_CF + tristate "OCTEON Boot Bus Compact Flash support" + depends on CPU_CAVIUM_OCTEON + help + This option enables a polled compact flash driver for use with + compact flash cards attached to the OCTEON boot bus. + + If unsure, say N. + +config SATA_QSTOR + tristate "Pacific Digital SATA QStor support" + depends on PCI + help + This option enables support for Pacific Digital Serial ATA QStor. + + If unsure, say N. + +config SATA_SX4 + tristate "Promise SATA SX4 support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for Promise Serial ATA SX4. If unsure, say N. +config ATA_BMDMA + bool "ATA BMDMA support" + default y + help + This option adds support for SFF ATA controllers with BMDMA + capability. BMDMA stands for bus-master DMA and is the + de facto DMA interface for SFF controllers. + + If unsure, say Y. + +if ATA_BMDMA + +comment "SATA SFF controllers with BMDMA" + config ATA_PIIX tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" depends on PCI @@ -138,22 +204,6 @@ config SATA_NV If unsure, say N. -config PDC_ADMA - tristate "Pacific Digital ADMA support" - depends on PCI - help - This option enables support for Pacific Digital ADMA controllers - - If unsure, say N. - -config SATA_QSTOR - tristate "Pacific Digital SATA QStor support" - depends on PCI - help - This option enables support for Pacific Digital Serial ATA QStor. - - If unsure, say N. - config SATA_PROMISE tristate "Promise SATA TX2/TX4 support" depends on PCI @@ -162,14 +212,6 @@ config SATA_PROMISE If unsure, say N. -config SATA_SX4 - tristate "Promise SATA SX4 support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for Promise Serial ATA SX4. - - If unsure, say N. - config SATA_SIL tristate "Silicon Image SATA support" depends on PCI @@ -189,6 +231,15 @@ config SATA_SIS enable the PATA_SIS driver in the config. If unsure, say N. +config SATA_SVW + tristate "ServerWorks Frodo / Apple K2 SATA support" + depends on PCI + help + This option enables support for Broadcom/Serverworks/Apple K2 + SATA support. + + If unsure, say N. + config SATA_ULI tristate "ULi Electronics SATA support" depends on PCI @@ -213,20 +264,7 @@ config SATA_VITESSE If unsure, say N. -config SATA_INIC162X - tristate "Initio 162x SATA support" - depends on PCI - help - This option enables support for Initio 162x Serial ATA. - -config PATA_ACPI - tristate "ACPI firmware driver for PATA" - depends on ATA_ACPI - help - This option enables an ACPI method driver which drives - motherboard PATA controller interfaces through the ACPI - firmware in the BIOS. This driver can sometimes handle - otherwise unsupported hardware. +comment "PATA SFF controllers with BMDMA" config PATA_ALI tristate "ALi PATA support" @@ -254,40 +292,30 @@ config PATA_ARTOP If unsure, say N. -config PATA_ATP867X - tristate "ARTOP/Acard ATP867X PATA support" +config PATA_ATIIXP + tristate "ATI PATA support" depends on PCI help - This option enables support for ARTOP/Acard ATP867X PATA - controllers. - - If unsure, say N. - -config PATA_AT32 - tristate "Atmel AVR32 PATA support (Experimental)" - depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL - help - This option enables support for the IDE devices on the - Atmel AT32AP platform. + This option enables support for the ATI ATA interfaces + found on the many ATI chipsets. If unsure, say N. -config PATA_ATIIXP - tristate "ATI PATA support" +config PATA_ATP867X + tristate "ARTOP/Acard ATP867X PATA support" depends on PCI help - This option enables support for the ATI ATA interfaces - found on the many ATI chipsets. + This option enables support for ARTOP/Acard ATP867X PATA + controllers. If unsure, say N. -config PATA_CMD640_PCI - tristate "CMD640 PCI PATA support (Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_BF54X + tristate "Blackfin 54x ATAPI support" + depends on BF542 || BF548 || BF549 help - This option enables support for the CMD640 PCI IDE - interface chip. Only the primary channel is currently - supported. + This option enables support for the built-in ATAPI controller on + Blackfin 54x family chips. If unsure, say N. @@ -354,15 +382,6 @@ config PATA_EFAR If unsure, say N. -config ATA_GENERIC - tristate "Generic ATA support" - depends on PCI - help - This option enables support for generic BIOS configured - ATA controllers via the new ATA layer - - If unsure, say N. - config PATA_HPT366 tristate "HPT 366/368 PATA support" depends on PCI @@ -407,12 +426,20 @@ config PATA_HPT3X3_DMA controllers. Enable with care as there are still some problems with DMA on this chipset. -config PATA_ISAPNP - tristate "ISA Plug and Play PATA support" - depends on ISAPNP +config PATA_ICSIDE + tristate "Acorn ICS PATA support" + depends on ARM && ARCH_ACORN help - This option enables support for ISA plug & play ATA - controllers such as those found on old soundcards. + On Acorn systems, say Y here if you wish to use the ICS PATA + interface card. This is not required for ICS partition support. + If you are unsure, say N to this. + +config PATA_IT8213 + tristate "IT8213 PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the ITE 821 PATA + controllers via the new ATA layer. If unsure, say N. @@ -426,15 +453,6 @@ config PATA_IT821X If unsure, say N. -config PATA_IT8213 - tristate "IT8213 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for the ITE 821 PATA - controllers via the new ATA layer. - - If unsure, say N. - config PATA_JMICRON tristate "JMicron PATA support" depends on PCI @@ -444,23 +462,14 @@ config PATA_JMICRON If unsure, say N. -config PATA_LEGACY - tristate "Legacy ISA PATA support (Experimental)" - depends on (ISA || PCI) && EXPERIMENTAL - help - This option enables support for ISA/VLB/PCI bus legacy PATA - ports and allows them to be accessed via the new ATA layer. - - If unsure, say N. - -config PATA_TRIFLEX - tristate "Compaq Triflex PATA support" - depends on PCI +config PATA_MACIO + tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" + depends on PPC_PMAC help - Enable support for the Compaq 'Triflex' IDE controller as found - on many Compaq Pentium-Pro systems, via the new ATA layer. - - If unsure, say N. + Most IDE capable PowerMacs have IDE busses driven by a variant + of this controller which is part of the Apple chipset used on + most PowerMac models. Some models have multiple busses using + different chipsets, though generally, MacIO is one of them. config PATA_MARVELL tristate "Marvell PATA support via legacy mode" @@ -473,32 +482,6 @@ config PATA_MARVELL If unsure, say N. -config PATA_MPC52xx - tristate "Freescale MPC52xx SoC internal IDE" - depends on PPC_MPC52xx && PPC_BESTCOMM - select PPC_BESTCOMM_ATA - help - This option enables support for integrated IDE controller - of the Freescale MPC52xx SoC. - - If unsure, say N. - -config PATA_MPIIX - tristate "Intel PATA MPIIX support" - depends on PCI - help - This option enables support for MPIIX PATA support. - - If unsure, say N. - -config PATA_OLDPIIX - tristate "Intel PATA old PIIX support" - depends on PCI - help - This option enables support for early PIIX PATA support. - - If unsure, say N. - config PATA_NETCELL tristate "NETCELL Revolution RAID support" depends on PCI @@ -517,15 +500,6 @@ config PATA_NINJA32 If unsure, say N. -config PATA_NS87410 - tristate "Nat Semi NS87410 PATA support" - depends on PCI - help - This option enables support for the National Semiconductor - NS87410 PCI-IDE controller. - - If unsure, say N. - config PATA_NS87415 tristate "Nat Semi NS87415 PATA support" depends on PCI @@ -535,12 +509,11 @@ config PATA_NS87415 If unsure, say N. -config PATA_OPTI - tristate "OPTI621/6215 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_OLDPIIX + tristate "Intel PATA old PIIX support" + depends on PCI help - This option enables full PIO support for the early Opti ATA - controllers found on some old motherboards. + This option enables support for early PIIX PATA support. If unsure, say N. @@ -554,24 +527,6 @@ config PATA_OPTIDMA If unsure, say N. -config PATA_PALMLD - tristate "Palm LifeDrive PATA support" - depends on MACH_PALMLD - help - This option enables support for Palm LifeDrive's internal ATA - port via the new ATA layer. - - If unsure, say N. - -config PATA_PCMCIA - tristate "PCMCIA PATA support" - depends on PCMCIA - help - This option enables support for PCMCIA ATA interfaces, including - compact flash card adapters via the new ATA layer. - - If unsure, say N. - config PATA_PDC2027X tristate "Promise PATA 2027x support" depends on PCI @@ -589,12 +544,6 @@ config PATA_PDC_OLD If unsure, say N. -config PATA_QDI - tristate "QDI VLB PATA support" - depends on ISA - help - Support for QDI 6500 and 6580 PATA controllers on VESA local bus. - config PATA_RADISYS tristate "RADISYS 82600 PATA support (Experimental)" depends on PCI && EXPERIMENTAL @@ -604,15 +553,6 @@ config PATA_RADISYS If unsure, say N. -config PATA_RB532 - tristate "RouterBoard 532 PATA CompactFlash support" - depends on MIKROTIK_RB532 - help - This option enables support for the RouterBoard 532 - PATA CompactFlash controller. - - If unsure, say N. - config PATA_RDC tristate "RDC PATA support" depends on PCI @@ -623,21 +563,30 @@ config PATA_RDC If unsure, say N. -config PATA_RZ1000 - tristate "PC Tech RZ1000 PATA support" +config PATA_SC1200 + tristate "SC1200 PATA support" depends on PCI help - This option enables basic support for the PC Tech RZ1000/1 - PATA controllers via the new ATA layer + This option enables support for the NatSemi/AMD SC1200 SoC + companion chip used with the Geode processor family. If unsure, say N. -config PATA_SC1200 - tristate "SC1200 PATA support" +config PATA_SCC + tristate "Toshiba's Cell Reference Set IDE support" + depends on PCI && PPC_CELLEB + help + This option enables support for the built-in IDE controller on + Toshiba Cell Reference Board. + + If unsure, say N. + +config PATA_SCH + tristate "Intel SCH PATA support" depends on PCI help - This option enables support for the NatSemi/AMD SC1200 SoC - companion chip used with the Geode processor family. + This option enables support for Intel SCH PATA on the Intel + SCH (US15W, US15L, UL11L) series host controllers. If unsure, say N. @@ -675,6 +624,15 @@ config PATA_TOSHIBA If unsure, say N. +config PATA_TRIFLEX + tristate "Compaq Triflex PATA support" + depends on PCI + help + Enable support for the Compaq 'Triflex' IDE controller as found + on many Compaq Pentium-Pro systems, via the new ATA layer. + + If unsure, say N. + config PATA_VIA tristate "VIA PATA support" depends on PCI @@ -693,12 +651,99 @@ config PATA_WINBOND If unsure, say N. -config PATA_WINBOND_VLB - tristate "Winbond W83759A VLB PATA support (Experimental)" - depends on ISA && EXPERIMENTAL +endif # ATA_BMDMA + +comment "PIO-only SFF controllers" + +config PATA_AT32 + tristate "Atmel AVR32 PATA support (Experimental)" + depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL help - Support for the Winbond W83759A controller on Vesa Local Bus - systems. + This option enables support for the IDE devices on the + Atmel AT32AP platform. + + If unsure, say N. + +config PATA_AT91 + tristate "PATA support for AT91SAM9260" + depends on ARM && ARCH_AT91 + help + This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. + + If unsure, say N. + +config PATA_CMD640_PCI + tristate "CMD640 PCI PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the CMD640 PCI IDE + interface chip. Only the primary channel is currently + supported. + + If unsure, say N. + +config PATA_ISAPNP + tristate "ISA Plug and Play PATA support" + depends on ISAPNP + help + This option enables support for ISA plug & play ATA + controllers such as those found on old soundcards. + + If unsure, say N. + +config PATA_IXP4XX_CF + tristate "IXP4XX Compact Flash support" + depends on ARCH_IXP4XX + help + This option enables support for a Compact Flash connected on + the ixp4xx expansion bus. This driver had been written for + Loft/Avila boards in mind but can work with others. + + If unsure, say N. + +config PATA_MPIIX + tristate "Intel PATA MPIIX support" + depends on PCI + help + This option enables support for MPIIX PATA support. + + If unsure, say N. + +config PATA_NS87410 + tristate "Nat Semi NS87410 PATA support" + depends on PCI + help + This option enables support for the National Semiconductor + NS87410 PCI-IDE controller. + + If unsure, say N. + +config PATA_OPTI + tristate "OPTI621/6215 PATA support (Very Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables full PIO support for the early Opti ATA + controllers found on some old motherboards. + + If unsure, say N. + +config PATA_PALMLD + tristate "Palm LifeDrive PATA support" + depends on MACH_PALMLD + help + This option enables support for Palm LifeDrive's internal ATA + port via the new ATA layer. + + If unsure, say N. + +config PATA_PCMCIA + tristate "PCMCIA PATA support" + depends on PCMCIA + help + This option enables support for PCMCIA ATA interfaces, including + compact flash card adapters via the new ATA layer. + + If unsure, say N. config HAVE_PATA_PLATFORM bool @@ -717,14 +762,6 @@ config PATA_PLATFORM If unsure, say N. -config PATA_AT91 - tristate "PATA support for AT91SAM9260" - depends on ARM && ARCH_AT91 - help - This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. - - If unsure, say N. - config PATA_OF_PLATFORM tristate "OpenFirmware platform device PATA support" depends on PATA_PLATFORM && PPC_OF @@ -735,69 +772,65 @@ config PATA_OF_PLATFORM If unsure, say N. -config PATA_ICSIDE - tristate "Acorn ICS PATA support" - depends on ARM && ARCH_ACORN +config PATA_QDI + tristate "QDI VLB PATA support" + depends on ISA help - On Acorn systems, say Y here if you wish to use the ICS PATA - interface card. This is not required for ICS partition support. - If you are unsure, say N to this. + Support for QDI 6500 and 6580 PATA controllers on VESA local bus. -config PATA_IXP4XX_CF - tristate "IXP4XX Compact Flash support" - depends on ARCH_IXP4XX +config PATA_RB532 + tristate "RouterBoard 532 PATA CompactFlash support" + depends on MIKROTIK_RB532 help - This option enables support for a Compact Flash connected on - the ixp4xx expansion bus. This driver had been written for - Loft/Avila boards in mind but can work with others. + This option enables support for the RouterBoard 532 + PATA CompactFlash controller. If unsure, say N. -config PATA_OCTEON_CF - tristate "OCTEON Boot Bus Compact Flash support" - depends on CPU_CAVIUM_OCTEON +config PATA_RZ1000 + tristate "PC Tech RZ1000 PATA support" + depends on PCI help - This option enables a polled compact flash driver for use with - compact flash cards attached to the OCTEON boot bus. + This option enables basic support for the PC Tech RZ1000/1 + PATA controllers via the new ATA layer If unsure, say N. -config PATA_SCC - tristate "Toshiba's Cell Reference Set IDE support" - depends on PCI && PPC_CELLEB +config PATA_WINBOND_VLB + tristate "Winbond W83759A VLB PATA support (Experimental)" + depends on ISA && EXPERIMENTAL help - This option enables support for the built-in IDE controller on - Toshiba Cell Reference Board. + Support for the Winbond W83759A controller on Vesa Local Bus + systems. - If unsure, say N. +comment "Generic fallback / legacy drivers" -config PATA_SCH - tristate "Intel SCH PATA support" - depends on PCI +config PATA_ACPI + tristate "ACPI firmware driver for PATA" + depends on ATA_ACPI && ATA_BMDMA help - This option enables support for Intel SCH PATA on the Intel - SCH (US15W, US15L, UL11L) series host controllers. - - If unsure, say N. + This option enables an ACPI method driver which drives + motherboard PATA controller interfaces through the ACPI + firmware in the BIOS. This driver can sometimes handle + otherwise unsupported hardware. -config PATA_BF54X - tristate "Blackfin 54x ATAPI support" - depends on BF542 || BF548 || BF549 +config ATA_GENERIC + tristate "Generic ATA support" + depends on PCI && ATA_BMDMA help - This option enables support for the built-in ATAPI controller on - Blackfin 54x family chips. + This option enables support for generic BIOS configured + ATA controllers via the new ATA layer If unsure, say N. -config PATA_MACIO - tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" - depends on PPC_PMAC +config PATA_LEGACY + tristate "Legacy ISA PATA support (Experimental)" + depends on (ISA || PCI) && EXPERIMENTAL help - Most IDE capable PowerMacs have IDE busses driven by a variant - of this controller which is part of the Apple chipset used on - most PowerMac models. Some models have multiple busses using - different chipsets, though generally, MacIO is one of them. + This option enables support for ISA/VLB/PCI bus legacy PATA + ports and allows them to be accessed via the new ATA layer. + If unsure, say N. endif # ATA_SFF endif # ATA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index fc936d4471d..7ef89d73df6 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -1,32 +1,39 @@ obj-$(CONFIG_ATA) += libata.o -obj-$(CONFIG_SATA_AHCI) += ahci.o -obj-$(CONFIG_SATA_SVW) += sata_svw.o +# non-SFF interface +obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o +obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o +obj-$(CONFIG_SATA_FSL) += sata_fsl.o +obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o +obj-$(CONFIG_SATA_SIL24) += sata_sil24.o + +# SFF w/ custom DMA +obj-$(CONFIG_PDC_ADMA) += pdc_adma.o +obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o +obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o +obj-$(CONFIG_SATA_SX4) += sata_sx4.o + +# SFF SATA w/ BMDMA obj-$(CONFIG_ATA_PIIX) += ata_piix.o +obj-$(CONFIG_SATA_MV) += sata_mv.o +obj-$(CONFIG_SATA_NV) += sata_nv.o obj-$(CONFIG_SATA_PROMISE) += sata_promise.o -obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o obj-$(CONFIG_SATA_SIL) += sata_sil.o -obj-$(CONFIG_SATA_SIL24) += sata_sil24.o -obj-$(CONFIG_SATA_VIA) += sata_via.o -obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o obj-$(CONFIG_SATA_SIS) += sata_sis.o -obj-$(CONFIG_SATA_SX4) += sata_sx4.o -obj-$(CONFIG_SATA_NV) += sata_nv.o +obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o -obj-$(CONFIG_SATA_MV) += sata_mv.o -obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o -obj-$(CONFIG_PDC_ADMA) += pdc_adma.o -obj-$(CONFIG_SATA_FSL) += sata_fsl.o -obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_SATA_VIA) += sata_via.o +obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o +# SFF PATA w/ BMDMA obj-$(CONFIG_PATA_ALI) += pata_ali.o obj-$(CONFIG_PATA_AMD) += pata_amd.o obj-$(CONFIG_PATA_ARTOP) += pata_artop.o -obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o -obj-$(CONFIG_PATA_AT32) += pata_at32.o obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o -obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o +obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o @@ -38,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o -obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o -obj-$(CONFIG_PATA_IT821X) += pata_it821x.o +obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o obj-$(CONFIG_PATA_IT8213) += pata_it8213.o +obj-$(CONFIG_PATA_IT821X) += pata_it821x.o obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o +obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o -obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o -obj-$(CONFIG_PATA_OPTI) += pata_opti.o -obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o -obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o -obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o -obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o -obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o -obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o -obj-$(CONFIG_PATA_QDI) += pata_qdi.o obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o -obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o obj-$(CONFIG_PATA_RDC) += pata_rdc.o -obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o +obj-$(CONFIG_PATA_SCC) += pata_scc.o +obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o obj-$(CONFIG_PATA_SIL680) += pata_sil680.o +obj-$(CONFIG_PATA_SIS) += pata_sis.o obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o +obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o obj-$(CONFIG_PATA_VIA) += pata_via.o obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o -obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o -obj-$(CONFIG_PATA_SIS) += pata_sis.o -obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o + +# SFF PIO only +obj-$(CONFIG_PATA_AT32) += pata_at32.o +obj-$(CONFIG_PATA_AT91) += pata_at91.o +obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o -obj-$(CONFIG_PATA_SCC) += pata_scc.o -obj-$(CONFIG_PATA_SCH) += pata_sch.o -obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o -obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o +obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o +obj-$(CONFIG_PATA_OPTI) += pata_opti.o +obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o -obj-$(CONFIG_PATA_AT91) += pata_at91.o obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o -obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o +obj-$(CONFIG_PATA_QDI) += pata_qdi.o +obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o +obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o +obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o + # Should be last but two libata driver obj-$(CONFIG_PATA_ACPI) += pata_acpi.o # Should be last but one libata driver diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 5326af28a41..f2522534ae6 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -46,403 +46,48 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <linux/libata.h> +#include "ahci.h" #define DRV_NAME "ahci" #define DRV_VERSION "3.0" -/* Enclosure Management Control */ -#define EM_CTRL_MSG_TYPE 0x000f0000 - -/* Enclosure Management LED Message Type */ -#define EM_MSG_LED_HBA_PORT 0x0000000f -#define EM_MSG_LED_PMP_SLOT 0x0000ff00 -#define EM_MSG_LED_VALUE 0xffff0000 -#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 -#define EM_MSG_LED_VALUE_OFF 0xfff80000 -#define EM_MSG_LED_VALUE_ON 0x00010000 - -static int ahci_skip_host_reset; -static int ahci_ignore_sss; - -module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); -MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); - -module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); -MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); - -static int ahci_enable_alpm(struct ata_port *ap, - enum link_pm policy); -static void ahci_disable_alpm(struct ata_port *ap); -static ssize_t ahci_led_show(struct ata_port *ap, char *buf); -static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, - size_t size); -static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, - ssize_t size); - enum { AHCI_PCI_BAR = 5, - AHCI_MAX_PORTS = 32, - AHCI_MAX_SG = 168, /* hardware max is 64K */ - AHCI_DMA_BOUNDARY = 0xffffffff, - AHCI_MAX_CMDS = 32, - AHCI_CMD_SZ = 32, - AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, - AHCI_RX_FIS_SZ = 256, - AHCI_CMD_TBL_CDB = 0x40, - AHCI_CMD_TBL_HDR_SZ = 0x80, - AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), - AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, - AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + - AHCI_RX_FIS_SZ, - AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + - AHCI_CMD_TBL_AR_SZ + - (AHCI_RX_FIS_SZ * 16), - AHCI_IRQ_ON_SG = (1 << 31), - AHCI_CMD_ATAPI = (1 << 5), - AHCI_CMD_WRITE = (1 << 6), - AHCI_CMD_PREFETCH = (1 << 7), - AHCI_CMD_RESET = (1 << 8), - AHCI_CMD_CLR_BUSY = (1 << 10), - - RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ - RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ - RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ - - board_ahci = 0, - board_ahci_vt8251 = 1, - board_ahci_ign_iferr = 2, - board_ahci_sb600 = 3, - board_ahci_mv = 4, - board_ahci_sb700 = 5, /* for SB700 and SB800 */ - board_ahci_mcp65 = 6, - board_ahci_nopmp = 7, - board_ahci_yesncq = 8, - board_ahci_nosntf = 9, - - /* global controller registers */ - HOST_CAP = 0x00, /* host capabilities */ - HOST_CTL = 0x04, /* global host control */ - HOST_IRQ_STAT = 0x08, /* interrupt status */ - HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ - HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ - HOST_EM_LOC = 0x1c, /* Enclosure Management location */ - HOST_EM_CTL = 0x20, /* Enclosure Management Control */ - HOST_CAP2 = 0x24, /* host capabilities, extended */ - - /* HOST_CTL bits */ - HOST_RESET = (1 << 0), /* reset controller; self-clear */ - HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ - HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ - - /* HOST_CAP bits */ - HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ - HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ - HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ - HOST_CAP_PART = (1 << 13), /* Partial state capable */ - HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ - HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ - HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ - HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ - HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ - HOST_CAP_CLO = (1 << 24), /* Command List Override support */ - HOST_CAP_LED = (1 << 25), /* Supports activity LED */ - HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ - HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ - HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ - HOST_CAP_SNTF = (1 << 29), /* SNotification register */ - HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ - HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ - - /* HOST_CAP2 bits */ - HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ - HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ - HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ - - /* registers for each SATA port */ - PORT_LST_ADDR = 0x00, /* command list DMA addr */ - PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ - PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ - PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ - PORT_IRQ_STAT = 0x10, /* interrupt status */ - PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ - PORT_CMD = 0x18, /* port command */ - PORT_TFDATA = 0x20, /* taskfile data */ - PORT_SIG = 0x24, /* device TF signature */ - PORT_CMD_ISSUE = 0x38, /* command issue */ - PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ - PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ - PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ - PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ - PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ - PORT_FBS = 0x40, /* FIS-based Switching */ - - /* PORT_IRQ_{STAT,MASK} bits */ - PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ - PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ - PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ - PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ - PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ - PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ - PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ - PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ - - PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ - PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ - PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ - PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ - PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ - PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ - PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ - PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ - PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ - - PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | - PORT_IRQ_IF_ERR | - PORT_IRQ_CONNECT | - PORT_IRQ_PHYRDY | - PORT_IRQ_UNK_FIS | - PORT_IRQ_BAD_PMP, - PORT_IRQ_ERROR = PORT_IRQ_FREEZE | - PORT_IRQ_TF_ERR | - PORT_IRQ_HBUS_DATA_ERR, - DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | - PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | - PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, - - /* PORT_CMD bits */ - PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ - PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ - PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ - PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ - PORT_CMD_PMP = (1 << 17), /* PMP attached */ - PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ - PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ - PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ - PORT_CMD_CLO = (1 << 3), /* Command list override */ - PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ - PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ - PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ - - PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ - PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ - PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ - PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ - - PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ - PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ - PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ - PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ - PORT_FBS_SDE = (1 << 2), /* FBS single device error */ - PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ - PORT_FBS_EN = (1 << 0), /* Enable FBS */ - - /* hpriv->flags bits */ - AHCI_HFLAG_NO_NCQ = (1 << 0), - AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ - AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ - AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ - AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ - AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ - AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ - AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ - AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ - AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ - AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ - AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as - link offline */ - AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ - - /* ap->flags bits */ - - AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | - ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | - ATA_FLAG_IPM, - - ICH_MAP = 0x90, /* ICH MAP register */ - - /* em constants */ - EM_MAX_SLOTS = 8, - EM_MAX_RETRY = 5, - - /* em_ctl bits */ - EM_CTL_RST = (1 << 9), /* Reset */ - EM_CTL_TM = (1 << 8), /* Transmit Message */ - EM_CTL_ALHD = (1 << 26), /* Activity LED */ -}; - -struct ahci_cmd_hdr { - __le32 opts; - __le32 status; - __le32 tbl_addr; - __le32 tbl_addr_hi; - __le32 reserved[4]; -}; - -struct ahci_sg { - __le32 addr; - __le32 addr_hi; - __le32 reserved; - __le32 flags_size; -}; - -struct ahci_em_priv { - enum sw_activity blink_policy; - struct timer_list timer; - unsigned long saved_activity; - unsigned long activity; - unsigned long led_state; -}; - -struct ahci_host_priv { - unsigned int flags; /* AHCI_HFLAG_* */ - u32 cap; /* cap to use */ - u32 cap2; /* cap2 to use */ - u32 port_map; /* port map to use */ - u32 saved_cap; /* saved initial cap */ - u32 saved_cap2; /* saved initial cap2 */ - u32 saved_port_map; /* saved initial port_map */ - u32 em_loc; /* enclosure management location */ }; -struct ahci_port_priv { - struct ata_link *active_link; - struct ahci_cmd_hdr *cmd_slot; - dma_addr_t cmd_slot_dma; - void *cmd_tbl; - dma_addr_t cmd_tbl_dma; - void *rx_fis; - dma_addr_t rx_fis_dma; - /* for NCQ spurious interrupt analysis */ - unsigned int ncq_saw_d2h:1; - unsigned int ncq_saw_dmas:1; - unsigned int ncq_saw_sdb:1; - u32 intr_mask; /* interrupts to enable */ - bool fbs_supported; /* set iff FBS is supported */ - bool fbs_enabled; /* set iff FBS is enabled */ - int fbs_last_dev; /* save FBS.DEV of last FIS */ - /* enclosure management info per PM slot */ - struct ahci_em_priv em_priv[EM_MAX_SLOTS]; +enum board_ids { + /* board IDs by feature in alphabetical order */ + board_ahci, + board_ahci_ign_iferr, + board_ahci_nosntf, + + /* board IDs for specific chipsets in alphabetical order */ + board_ahci_mcp65, + board_ahci_mcp77, + board_ahci_mcp89, + board_ahci_mv, + board_ahci_sb600, + board_ahci_sb700, /* for SB700 and SB800 */ + board_ahci_vt8251, + + /* aliases */ + board_ahci_mcp_linux = board_ahci_mcp65, + board_ahci_mcp67 = board_ahci_mcp65, + board_ahci_mcp73 = board_ahci_mcp65, + board_ahci_mcp79 = board_ahci_mcp77, }; -static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); -static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); -static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); -static int ahci_port_start(struct ata_port *ap); -static void ahci_port_stop(struct ata_port *ap); -static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); -static void ahci_qc_prep(struct ata_queued_cmd *qc); -static void ahci_freeze(struct ata_port *ap); -static void ahci_thaw(struct ata_port *ap); -static void ahci_enable_fbs(struct ata_port *ap); -static void ahci_disable_fbs(struct ata_port *ap); -static void ahci_pmp_attach(struct ata_port *ap); -static void ahci_pmp_detach(struct ata_port *ap); -static int ahci_softreset(struct ata_link *link, unsigned int *class, - unsigned long deadline); static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); -static int ahci_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); -static void ahci_postreset(struct ata_link *link, unsigned int *class); -static void ahci_error_handler(struct ata_port *ap); -static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); -static int ahci_port_resume(struct ata_port *ap); -static void ahci_dev_config(struct ata_device *dev); -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, - u32 opts); #ifdef CONFIG_PM -static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); #endif -static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); -static ssize_t ahci_activity_store(struct ata_device *dev, - enum sw_activity val); -static void ahci_init_sw_activity(struct ata_link *link); - -static ssize_t ahci_show_host_caps(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t ahci_show_host_cap2(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t ahci_show_host_version(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t ahci_show_port_cmd(struct device *dev, - struct device_attribute *attr, char *buf); - -static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); -static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); -static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); -static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); - -static struct device_attribute *ahci_shost_attrs[] = { - &dev_attr_link_power_management_policy, - &dev_attr_em_message_type, - &dev_attr_em_message, - &dev_attr_ahci_host_caps, - &dev_attr_ahci_host_cap2, - &dev_attr_ahci_host_version, - &dev_attr_ahci_port_cmd, - NULL -}; - -static struct device_attribute *ahci_sdev_attrs[] = { - &dev_attr_sw_activity, - &dev_attr_unload_heads, - NULL -}; - -static struct scsi_host_template ahci_sht = { - ATA_NCQ_SHT(DRV_NAME), - .can_queue = AHCI_MAX_CMDS - 1, - .sg_tablesize = AHCI_MAX_SG, - .dma_boundary = AHCI_DMA_BOUNDARY, - .shost_attrs = ahci_shost_attrs, - .sdev_attrs = ahci_sdev_attrs, -}; - -static struct ata_port_operations ahci_ops = { - .inherits = &sata_pmp_port_ops, - - .qc_defer = ahci_pmp_qc_defer, - .qc_prep = ahci_qc_prep, - .qc_issue = ahci_qc_issue, - .qc_fill_rtf = ahci_qc_fill_rtf, - - .freeze = ahci_freeze, - .thaw = ahci_thaw, - .softreset = ahci_softreset, - .hardreset = ahci_hardreset, - .postreset = ahci_postreset, - .pmp_softreset = ahci_softreset, - .error_handler = ahci_error_handler, - .post_internal_cmd = ahci_post_internal_cmd, - .dev_config = ahci_dev_config, - - .scr_read = ahci_scr_read, - .scr_write = ahci_scr_write, - .pmp_attach = ahci_pmp_attach, - .pmp_detach = ahci_pmp_detach, - - .enable_pm = ahci_enable_alpm, - .disable_pm = ahci_disable_alpm, - .em_show = ahci_led_show, - .em_store = ahci_led_store, - .sw_activity_show = ahci_activity_show, - .sw_activity_store = ahci_activity_store, -#ifdef CONFIG_PM - .port_suspend = ahci_port_suspend, - .port_resume = ahci_port_resume, -#endif - .port_start = ahci_port_start, - .port_stop = ahci_port_stop, -}; static struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, @@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = { #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) static const struct ata_port_info ahci_port_info[] = { + /* by features */ [board_ahci] = { .flags = AHCI_FLAG_COMMON, @@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - [board_ahci_vt8251] = + [board_ahci_ign_iferr] = { - AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), + AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_vt8251_ops, + .port_ops = &ahci_ops, }, - [board_ahci_ign_iferr] = + [board_ahci_nosntf] = { - AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), + AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - [board_ahci_sb600] = + /* by chipsets */ + [board_ahci_mcp65] = { - AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | - AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | - AHCI_HFLAG_32BIT_ONLY), + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | + AHCI_HFLAG_YES_NCQ), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_sb600_ops, + .port_ops = &ahci_ops, }, - [board_ahci_mv] = + [board_ahci_mcp77] = { - AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | - AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), - .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP), + .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - [board_ahci_sb700] = /* for SB700 and SB800 */ + [board_ahci_mcp89] = { - AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), + AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_sb600_ops, + .port_ops = &ahci_ops, }, - [board_ahci_mcp65] = + [board_ahci_mv] = { - AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), - .flags = AHCI_FLAG_COMMON, + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | + AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - [board_ahci_nopmp] = + [board_ahci_sb600] = { - AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | + AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | + AHCI_HFLAG_32BIT_ONLY), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, + .port_ops = &ahci_sb600_ops, }, - [board_ahci_yesncq] = + [board_ahci_sb700] = /* for SB700 and SB800 */ { - AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, + .port_ops = &ahci_sb600_ops, }, - [board_ahci_nosntf] = + [board_ahci_vt8251] = { - AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, + .port_ops = &ahci_vt8251_ops, }, }; @@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */ - { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ - { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ - { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */ - { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */ + { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */ + { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */ + { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */ /* SiS */ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ @@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = { #endif }; -static int ahci_em_messages = 1; -module_param(ahci_em_messages, int, 0444); -/* add other LED protocol types when they become supported */ -MODULE_PARM_DESC(ahci_em_messages, - "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); - #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) static int marvell_enable; #else @@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644); MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); -static inline int ahci_nr_ports(u32 cap) +static void ahci_pci_save_initial_config(struct pci_dev *pdev, + struct ahci_host_priv *hpriv) { - return (cap & 0x1f) + 1; -} + unsigned int force_port_map = 0; + unsigned int mask_port_map = 0; -static inline void __iomem *__ahci_port_base(struct ata_host *host, - unsigned int port_no) -{ - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - - return mmio + 0x100 + (port_no * 0x80); -} - -static inline void __iomem *ahci_port_base(struct ata_port *ap) -{ - return __ahci_port_base(ap->host, ap->port_no); -} - -static void ahci_enable_ahci(void __iomem *mmio) -{ - int i; - u32 tmp; - - /* turn on AHCI_EN */ - tmp = readl(mmio + HOST_CTL); - if (tmp & HOST_AHCI_EN) - return; - - /* Some controllers need AHCI_EN to be written multiple times. - * Try a few times before giving up. - */ - for (i = 0; i < 5; i++) { - tmp |= HOST_AHCI_EN; - writel(tmp, mmio + HOST_CTL); - tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ - if (tmp & HOST_AHCI_EN) - return; - msleep(10); - } - - WARN_ON(1); -} - -static ssize_t ahci_show_host_caps(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - struct ahci_host_priv *hpriv = ap->host->private_data; - - return sprintf(buf, "%x\n", hpriv->cap); -} - -static ssize_t ahci_show_host_cap2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - struct ahci_host_priv *hpriv = ap->host->private_data; - - return sprintf(buf, "%x\n", hpriv->cap2); -} - -static ssize_t ahci_show_host_version(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; - - return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); -} - -static ssize_t ahci_show_port_cmd(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ata_port *ap = ata_shost_to_port(shost); - void __iomem *port_mmio = ahci_port_base(ap); - - return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); -} - -/** - * ahci_save_initial_config - Save and fixup initial config values - * @pdev: target PCI device - * @hpriv: host private area to store config values - * - * Some registers containing configuration info might be setup by - * BIOS and might be cleared on reset. This function saves the - * initial values of those registers into @hpriv such that they - * can be restored after controller reset. - * - * If inconsistent, config values are fixed up by this function. - * - * LOCKING: - * None. - */ -static void ahci_save_initial_config(struct pci_dev *pdev, - struct ahci_host_priv *hpriv) -{ - void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; - u32 cap, cap2, vers, port_map; - int i; - int mv; - - /* make sure AHCI mode is enabled before accessing CAP */ - ahci_enable_ahci(mmio); - - /* Values prefixed with saved_ are written back to host after - * reset. Values without are used for driver operation. - */ - hpriv->saved_cap = cap = readl(mmio + HOST_CAP); - hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); - - /* CAP2 register is only defined for AHCI 1.2 and later */ - vers = readl(mmio + HOST_VERSION); - if ((vers >> 16) > 1 || - ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200)) - hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2); - else - hpriv->saved_cap2 = cap2 = 0; - - /* some chips have errata preventing 64bit use */ - if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { - dev_printk(KERN_INFO, &pdev->dev, - "controller can't do 64bit DMA, forcing 32bit\n"); - cap &= ~HOST_CAP_64; - } - - if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { - dev_printk(KERN_INFO, &pdev->dev, - "controller can't do NCQ, turning off CAP_NCQ\n"); - cap &= ~HOST_CAP_NCQ; - } - - if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { - dev_printk(KERN_INFO, &pdev->dev, - "controller can do NCQ, turning on CAP_NCQ\n"); - cap |= HOST_CAP_NCQ; - } - - if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { - dev_printk(KERN_INFO, &pdev->dev, - "controller can't do PMP, turning off CAP_PMP\n"); - cap &= ~HOST_CAP_PMP; - } - - if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { - dev_printk(KERN_INFO, &pdev->dev, - "controller can't do SNTF, turning off CAP_SNTF\n"); - cap &= ~HOST_CAP_SNTF; - } - - if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && - port_map != 1) { - dev_printk(KERN_INFO, &pdev->dev, - "JMB361 has only one port, port_map 0x%x -> 0x%x\n", - port_map, 1); - port_map = 1; + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { + dev_info(&pdev->dev, "JMB361 has only one port\n"); + force_port_map = 1; } /* @@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev, */ if (hpriv->flags & AHCI_HFLAG_MV_PATA) { if (pdev->device == 0x6121) - mv = 0x3; + mask_port_map = 0x3; else - mv = 0xf; - dev_printk(KERN_ERR, &pdev->dev, - "MV_AHCI HACK: port_map %x -> %x\n", - port_map, - port_map & mv); - dev_printk(KERN_ERR, &pdev->dev, + mask_port_map = 0xf; + dev_info(&pdev->dev, "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); - - port_map &= mv; } - /* cross check port_map and cap.n_ports */ - if (port_map) { - int map_ports = 0; - - for (i = 0; i < AHCI_MAX_PORTS; i++) - if (port_map & (1 << i)) - map_ports++; - - /* If PI has more ports than n_ports, whine, clear - * port_map and let it be generated from n_ports. - */ - if (map_ports > ahci_nr_ports(cap)) { - dev_printk(KERN_WARNING, &pdev->dev, - "implemented port map (0x%x) contains more " - "ports than nr_ports (%u), using nr_ports\n", - port_map, ahci_nr_ports(cap)); - port_map = 0; - } - } - - /* fabricate port_map from cap.nr_ports */ - if (!port_map) { - port_map = (1 << ahci_nr_ports(cap)) - 1; - dev_printk(KERN_WARNING, &pdev->dev, - "forcing PORTS_IMPL to 0x%x\n", port_map); - - /* write the fixed up value to the PI register */ - hpriv->saved_port_map = port_map; - } - - /* record values to use during operation */ - hpriv->cap = cap; - hpriv->cap2 = cap2; - hpriv->port_map = port_map; + ahci_save_initial_config(&pdev->dev, hpriv, force_port_map, + mask_port_map); } -/** - * ahci_restore_initial_config - Restore initial config - * @host: target ATA host - * - * Restore initial config stored by ahci_save_initial_config(). - * - * LOCKING: - * None. - */ -static void ahci_restore_initial_config(struct ata_host *host) -{ - struct ahci_host_priv *hpriv = host->private_data; - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - - writel(hpriv->saved_cap, mmio + HOST_CAP); - if (hpriv->saved_cap2) - writel(hpriv->saved_cap2, mmio + HOST_CAP2); - writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); - (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ -} - -static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) -{ - static const int offset[] = { - [SCR_STATUS] = PORT_SCR_STAT, - [SCR_CONTROL] = PORT_SCR_CTL, - [SCR_ERROR] = PORT_SCR_ERR, - [SCR_ACTIVE] = PORT_SCR_ACT, - [SCR_NOTIFICATION] = PORT_SCR_NTF, - }; - struct ahci_host_priv *hpriv = ap->host->private_data; - - if (sc_reg < ARRAY_SIZE(offset) && - (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) - return offset[sc_reg]; - return 0; -} - -static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) -{ - void __iomem *port_mmio = ahci_port_base(link->ap); - int offset = ahci_scr_offset(link->ap, sc_reg); - - if (offset) { - *val = readl(port_mmio + offset); - return 0; - } - return -EINVAL; -} - -static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) -{ - void __iomem *port_mmio = ahci_port_base(link->ap); - int offset = ahci_scr_offset(link->ap, sc_reg); - - if (offset) { - writel(val, port_mmio + offset); - return 0; - } - return -EINVAL; -} - -static void ahci_start_engine(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - u32 tmp; - - /* start DMA */ - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_START; - writel(tmp, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* flush */ -} - -static int ahci_stop_engine(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - u32 tmp; - - tmp = readl(port_mmio + PORT_CMD); - - /* check if the HBA is idle */ - if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) - return 0; - - /* setting HBA to idle */ - tmp &= ~PORT_CMD_START; - writel(tmp, port_mmio + PORT_CMD); - - /* wait for engine to stop. This could be as long as 500 msec */ - tmp = ata_wait_register(port_mmio + PORT_CMD, - PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); - if (tmp & PORT_CMD_LIST_ON) - return -EIO; - - return 0; -} - -static void ahci_start_fis_rx(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp = ap->private_data; - u32 tmp; - - /* set FIS registers */ - if (hpriv->cap & HOST_CAP_64) - writel((pp->cmd_slot_dma >> 16) >> 16, - port_mmio + PORT_LST_ADDR_HI); - writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); - - if (hpriv->cap & HOST_CAP_64) - writel((pp->rx_fis_dma >> 16) >> 16, - port_mmio + PORT_FIS_ADDR_HI); - writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); - - /* enable FIS reception */ - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_FIS_RX; - writel(tmp, port_mmio + PORT_CMD); - - /* flush */ - readl(port_mmio + PORT_CMD); -} - -static int ahci_stop_fis_rx(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - u32 tmp; - - /* disable FIS reception */ - tmp = readl(port_mmio + PORT_CMD); - tmp &= ~PORT_CMD_FIS_RX; - writel(tmp, port_mmio + PORT_CMD); - - /* wait for completion, spec says 500ms, give it 1000 */ - tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, - PORT_CMD_FIS_ON, 10, 1000); - if (tmp & PORT_CMD_FIS_ON) - return -EBUSY; - - return 0; -} - -static void ahci_power_up(struct ata_port *ap) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd; - - cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; - - /* spin up device */ - if (hpriv->cap & HOST_CAP_SSS) { - cmd |= PORT_CMD_SPIN_UP; - writel(cmd, port_mmio + PORT_CMD); - } - - /* wake up link */ - writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); -} - -static void ahci_disable_alpm(struct ata_port *ap) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd; - struct ahci_port_priv *pp = ap->private_data; - - /* IPM bits should be disabled by libata-core */ - /* get the existing command bits */ - cmd = readl(port_mmio + PORT_CMD); - - /* disable ALPM and ASP */ - cmd &= ~PORT_CMD_ASP; - cmd &= ~PORT_CMD_ALPE; - - /* force the interface back to active */ - cmd |= PORT_CMD_ICC_ACTIVE; - - /* write out new cmd value */ - writel(cmd, port_mmio + PORT_CMD); - cmd = readl(port_mmio + PORT_CMD); - - /* wait 10ms to be sure we've come out of any low power state */ - msleep(10); - - /* clear out any PhyRdy stuff from interrupt status */ - writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); - - /* go ahead and clean out PhyRdy Change from Serror too */ - ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); - - /* - * Clear flag to indicate that we should ignore all PhyRdy - * state changes - */ - hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; - - /* - * Enable interrupts on Phy Ready. - */ - pp->intr_mask |= PORT_IRQ_PHYRDY; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); - - /* - * don't change the link pm policy - we can be called - * just to turn of link pm temporarily - */ -} - -static int ahci_enable_alpm(struct ata_port *ap, - enum link_pm policy) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd; - struct ahci_port_priv *pp = ap->private_data; - u32 asp; - - /* Make sure the host is capable of link power management */ - if (!(hpriv->cap & HOST_CAP_ALPM)) - return -EINVAL; - - switch (policy) { - case MAX_PERFORMANCE: - case NOT_AVAILABLE: - /* - * if we came here with NOT_AVAILABLE, - * it just means this is the first time we - * have tried to enable - default to max performance, - * and let the user go to lower power modes on request. - */ - ahci_disable_alpm(ap); - return 0; - case MIN_POWER: - /* configure HBA to enter SLUMBER */ - asp = PORT_CMD_ASP; - break; - case MEDIUM_POWER: - /* configure HBA to enter PARTIAL */ - asp = 0; - break; - default: - return -EINVAL; - } - - /* - * Disable interrupts on Phy Ready. This keeps us from - * getting woken up due to spurious phy ready interrupts - * TBD - Hot plug should be done via polling now, is - * that even supported? - */ - pp->intr_mask &= ~PORT_IRQ_PHYRDY; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); - - /* - * Set a flag to indicate that we should ignore all PhyRdy - * state changes since these can happen now whenever we - * change link state - */ - hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; - - /* get the existing command bits */ - cmd = readl(port_mmio + PORT_CMD); - - /* - * Set ASP based on Policy - */ - cmd |= asp; - - /* - * Setting this bit will instruct the HBA to aggressively - * enter a lower power link state when it's appropriate and - * based on the value set above for ASP - */ - cmd |= PORT_CMD_ALPE; - - /* write out new cmd value */ - writel(cmd, port_mmio + PORT_CMD); - cmd = readl(port_mmio + PORT_CMD); - - /* IPM bits should be set by libata-core */ - return 0; -} - -#ifdef CONFIG_PM -static void ahci_power_down(struct ata_port *ap) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd, scontrol; - - if (!(hpriv->cap & HOST_CAP_SSS)) - return; - - /* put device into listen mode, first set PxSCTL.DET to 0 */ - scontrol = readl(port_mmio + PORT_SCR_CTL); - scontrol &= ~0xf; - writel(scontrol, port_mmio + PORT_SCR_CTL); - - /* then set PxCMD.SUD to 0 */ - cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; - cmd &= ~PORT_CMD_SPIN_UP; - writel(cmd, port_mmio + PORT_CMD); -} -#endif - -static void ahci_start_port(struct ata_port *ap) -{ - struct ahci_port_priv *pp = ap->private_data; - struct ata_link *link; - struct ahci_em_priv *emp; - ssize_t rc; - int i; - - /* enable FIS reception */ - ahci_start_fis_rx(ap); - - /* enable DMA */ - ahci_start_engine(ap); - - /* turn on LEDs */ - if (ap->flags & ATA_FLAG_EM) { - ata_for_each_link(link, ap, EDGE) { - emp = &pp->em_priv[link->pmp]; - - /* EM Transmit bit maybe busy during init */ - for (i = 0; i < EM_MAX_RETRY; i++) { - rc = ahci_transmit_led_message(ap, - emp->led_state, - 4); - if (rc == -EBUSY) - msleep(1); - else - break; - } - } - } - - if (ap->flags & ATA_FLAG_SW_ACTIVITY) - ata_for_each_link(link, ap, EDGE) - ahci_init_sw_activity(link); - -} - -static int ahci_deinit_port(struct ata_port *ap, const char **emsg) -{ - int rc; - - /* disable DMA */ - rc = ahci_stop_engine(ap); - if (rc) { - *emsg = "failed to stop engine"; - return rc; - } - - /* disable FIS reception */ - rc = ahci_stop_fis_rx(ap); - if (rc) { - *emsg = "failed stop FIS RX"; - return rc; - } - - return 0; -} - -static int ahci_reset_controller(struct ata_host *host) +static int ahci_pci_reset_controller(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); - struct ahci_host_priv *hpriv = host->private_data; - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - u32 tmp; - - /* we must be in AHCI mode, before using anything - * AHCI-specific, such as HOST_RESET. - */ - ahci_enable_ahci(mmio); - - /* global controller reset */ - if (!ahci_skip_host_reset) { - tmp = readl(mmio + HOST_CTL); - if ((tmp & HOST_RESET) == 0) { - writel(tmp | HOST_RESET, mmio + HOST_CTL); - readl(mmio + HOST_CTL); /* flush */ - } - - /* - * to perform host reset, OS should set HOST_RESET - * and poll until this bit is read to be "0". - * reset must complete within 1 second, or - * the hardware should be considered fried. - */ - tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, - HOST_RESET, 10, 1000); - - if (tmp & HOST_RESET) { - dev_printk(KERN_ERR, host->dev, - "controller reset failed (0x%x)\n", tmp); - return -EIO; - } - - /* turn on AHCI mode */ - ahci_enable_ahci(mmio); - /* Some registers might be cleared on reset. Restore - * initial values. - */ - ahci_restore_initial_config(host); - } else - dev_printk(KERN_INFO, host->dev, - "skipping global host reset\n"); + ahci_reset_controller(host); if (pdev->vendor == PCI_VENDOR_ID_INTEL) { + struct ahci_host_priv *hpriv = host->private_data; u16 tmp16; /* configure PCS */ @@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host) return 0; } -static void ahci_sw_activity(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; - - if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) - return; - - emp->activity++; - if (!timer_pending(&emp->timer)) - mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); -} - -static void ahci_sw_activity_blink(unsigned long arg) -{ - struct ata_link *link = (struct ata_link *)arg; - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; - unsigned long led_message = emp->led_state; - u32 activity_led_state; - unsigned long flags; - - led_message &= EM_MSG_LED_VALUE; - led_message |= ap->port_no | (link->pmp << 8); - - /* check to see if we've had activity. If so, - * toggle state of LED and reset timer. If not, - * turn LED to desired idle state. - */ - spin_lock_irqsave(ap->lock, flags); - if (emp->saved_activity != emp->activity) { - emp->saved_activity = emp->activity; - /* get the current LED state */ - activity_led_state = led_message & EM_MSG_LED_VALUE_ON; - - if (activity_led_state) - activity_led_state = 0; - else - activity_led_state = 1; - - /* clear old state */ - led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; - - /* toggle state */ - led_message |= (activity_led_state << 16); - mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); - } else { - /* switch to idle */ - led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; - if (emp->blink_policy == BLINK_OFF) - led_message |= (1 << 16); - } - spin_unlock_irqrestore(ap->lock, flags); - ahci_transmit_led_message(ap, led_message, 4); -} - -static void ahci_init_sw_activity(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; - - /* init activity stats, setup timer */ - emp->saved_activity = emp->activity = 0; - setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); - - /* check our blink policy and set flag for link if it's enabled */ - if (emp->blink_policy) - link->flags |= ATA_LFLAG_SW_ACTIVITY; -} - -static int ahci_reset_em(struct ata_host *host) -{ - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - u32 em_ctl; - - em_ctl = readl(mmio + HOST_EM_CTL); - if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) - return -EINVAL; - - writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); - return 0; -} - -static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, - ssize_t size) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp = ap->private_data; - void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; - u32 em_ctl; - u32 message[] = {0, 0}; - unsigned long flags; - int pmp; - struct ahci_em_priv *emp; - - /* get the slot number from the message */ - pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; - if (pmp < EM_MAX_SLOTS) - emp = &pp->em_priv[pmp]; - else - return -EINVAL; - - spin_lock_irqsave(ap->lock, flags); - - /* - * if we are still busy transmitting a previous message, - * do not allow - */ - em_ctl = readl(mmio + HOST_EM_CTL); - if (em_ctl & EM_CTL_TM) { - spin_unlock_irqrestore(ap->lock, flags); - return -EBUSY; - } - - /* - * create message header - this is all zero except for - * the message size, which is 4 bytes. - */ - message[0] |= (4 << 8); - - /* ignore 0:4 of byte zero, fill in port info yourself */ - message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); - - /* write message to EM_LOC */ - writel(message[0], mmio + hpriv->em_loc); - writel(message[1], mmio + hpriv->em_loc+4); - - /* save off new led state for port/slot */ - emp->led_state = state; - - /* - * tell hardware to transmit the message - */ - writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); - - spin_unlock_irqrestore(ap->lock, flags); - return size; -} - -static ssize_t ahci_led_show(struct ata_port *ap, char *buf) -{ - struct ahci_port_priv *pp = ap->private_data; - struct ata_link *link; - struct ahci_em_priv *emp; - int rc = 0; - - ata_for_each_link(link, ap, EDGE) { - emp = &pp->em_priv[link->pmp]; - rc += sprintf(buf, "%lx\n", emp->led_state); - } - return rc; -} - -static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, - size_t size) -{ - int state; - int pmp; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp; - - state = simple_strtoul(buf, NULL, 0); - - /* get the slot number from the message */ - pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; - if (pmp < EM_MAX_SLOTS) - emp = &pp->em_priv[pmp]; - else - return -EINVAL; - - /* mask off the activity bits if we are in sw_activity - * mode, user should turn off sw_activity before setting - * activity led through em_message - */ - if (emp->blink_policy) - state &= ~EM_MSG_LED_VALUE_ACTIVITY; - - return ahci_transmit_led_message(ap, state, size); -} - -static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) -{ - struct ata_link *link = dev->link; - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; - u32 port_led_state = emp->led_state; - - /* save the desired Activity LED behavior */ - if (val == OFF) { - /* clear LFLAG */ - link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); - - /* set the LED to OFF */ - port_led_state &= EM_MSG_LED_VALUE_OFF; - port_led_state |= (ap->port_no | (link->pmp << 8)); - ahci_transmit_led_message(ap, port_led_state, 4); - } else { - link->flags |= ATA_LFLAG_SW_ACTIVITY; - if (val == BLINK_OFF) { - /* set LED to ON for idle */ - port_led_state &= EM_MSG_LED_VALUE_OFF; - port_led_state |= (ap->port_no | (link->pmp << 8)); - port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ - ahci_transmit_led_message(ap, port_led_state, 4); - } - } - emp->blink_policy = val; - return 0; -} - -static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) -{ - struct ata_link *link = dev->link; - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; - - /* display the saved value of activity behavior for this - * disk. - */ - return sprintf(buf, "%d\n", emp->blink_policy); -} - -static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, - int port_no, void __iomem *mmio, - void __iomem *port_mmio) -{ - const char *emsg = NULL; - int rc; - u32 tmp; - - /* make sure port is not active */ - rc = ahci_deinit_port(ap, &emsg); - if (rc) - dev_printk(KERN_WARNING, &pdev->dev, - "%s (%d)\n", emsg, rc); - - /* clear SError */ - tmp = readl(port_mmio + PORT_SCR_ERR); - VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); - writel(tmp, port_mmio + PORT_SCR_ERR); - - /* clear port IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); - if (tmp) - writel(tmp, port_mmio + PORT_IRQ_STAT); - - writel(1 << port_no, mmio + HOST_IRQ_STAT); -} - -static void ahci_init_controller(struct ata_host *host) +static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; struct pci_dev *pdev = to_pci_dev(host->dev); - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - int i; void __iomem *port_mmio; u32 tmp; int mv; @@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host) writel(tmp, port_mmio + PORT_IRQ_STAT); } - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - port_mmio = ahci_port_base(ap); - if (ata_port_is_dummy(ap)) - continue; - - ahci_port_init(pdev, ap, i, mmio, port_mmio); - } - - tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); - writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); - tmp = readl(mmio + HOST_CTL); - VPRINTK("HOST_CTL 0x%x\n", tmp); -} - -static void ahci_dev_config(struct ata_device *dev) -{ - struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; - - if (hpriv->flags & AHCI_HFLAG_SECT255) { - dev->max_sectors = 255; - ata_dev_printk(dev, KERN_INFO, - "SB600 AHCI: limiting to 255 sectors per cmd\n"); - } -} - -static unsigned int ahci_dev_classify(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ata_taskfile tf; - u32 tmp; - - tmp = readl(port_mmio + PORT_SIG); - tf.lbah = (tmp >> 24) & 0xff; - tf.lbam = (tmp >> 16) & 0xff; - tf.lbal = (tmp >> 8) & 0xff; - tf.nsect = (tmp) & 0xff; - - return ata_dev_classify(&tf); -} - -static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, - u32 opts) -{ - dma_addr_t cmd_tbl_dma; - - cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; - - pp->cmd_slot[tag].opts = cpu_to_le32(opts); - pp->cmd_slot[tag].status = 0; - pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); - pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); -} - -static int ahci_kick_engine(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_host_priv *hpriv = ap->host->private_data; - u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; - u32 tmp; - int busy, rc; - - /* stop engine */ - rc = ahci_stop_engine(ap); - if (rc) - goto out_restart; - - /* need to do CLO? - * always do CLO if PMP is attached (AHCI-1.3 9.2) - */ - busy = status & (ATA_BUSY | ATA_DRQ); - if (!busy && !sata_pmp_attached(ap)) { - rc = 0; - goto out_restart; - } - - if (!(hpriv->cap & HOST_CAP_CLO)) { - rc = -EOPNOTSUPP; - goto out_restart; - } - - /* perform CLO */ - tmp = readl(port_mmio + PORT_CMD); - tmp |= PORT_CMD_CLO; - writel(tmp, port_mmio + PORT_CMD); - - rc = 0; - tmp = ata_wait_register(port_mmio + PORT_CMD, - PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); - if (tmp & PORT_CMD_CLO) - rc = -EIO; - - /* restart engine */ - out_restart: - ahci_start_engine(ap); - return rc; -} - -static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, - struct ata_taskfile *tf, int is_cmd, u16 flags, - unsigned long timeout_msec) -{ - const u32 cmd_fis_len = 5; /* five dwords */ - struct ahci_port_priv *pp = ap->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u8 *fis = pp->cmd_tbl; - u32 tmp; - - /* prep the command */ - ata_tf_to_fis(tf, pmp, is_cmd, fis); - ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); - - /* issue & wait */ - writel(1, port_mmio + PORT_CMD_ISSUE); - - if (timeout_msec) { - tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, - 1, timeout_msec); - if (tmp & 0x1) { - ahci_kick_engine(ap); - return -EBUSY; - } - } else - readl(port_mmio + PORT_CMD_ISSUE); /* flush */ - - return 0; -} - -static int ahci_do_softreset(struct ata_link *link, unsigned int *class, - int pmp, unsigned long deadline, - int (*check_ready)(struct ata_link *link)) -{ - struct ata_port *ap = link->ap; - struct ahci_host_priv *hpriv = ap->host->private_data; - const char *reason = NULL; - unsigned long now, msecs; - struct ata_taskfile tf; - int rc; - - DPRINTK("ENTER\n"); - - /* prepare for SRST (AHCI-1.1 10.4.1) */ - rc = ahci_kick_engine(ap); - if (rc && rc != -EOPNOTSUPP) - ata_link_printk(link, KERN_WARNING, - "failed to reset engine (errno=%d)\n", rc); - - ata_tf_init(link->device, &tf); - - /* issue the first D2H Register FIS */ - msecs = 0; - now = jiffies; - if (time_after(now, deadline)) - msecs = jiffies_to_msecs(deadline - now); - - tf.ctl |= ATA_SRST; - if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, - AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { - rc = -EIO; - reason = "1st FIS failed"; - goto fail; - } - - /* spec says at least 5us, but be generous and sleep for 1ms */ - msleep(1); - - /* issue the second D2H Register FIS */ - tf.ctl &= ~ATA_SRST; - ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); - - /* wait for link to become ready */ - rc = ata_wait_after_reset(link, deadline, check_ready); - if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { - /* - * Workaround for cases where link online status can't - * be trusted. Treat device readiness timeout as link - * offline. - */ - ata_link_printk(link, KERN_INFO, - "device not ready, treating as offline\n"); - *class = ATA_DEV_NONE; - } else if (rc) { - /* link occupied, -ENODEV too is an error */ - reason = "device not ready"; - goto fail; - } else - *class = ahci_dev_classify(ap); - - DPRINTK("EXIT, class=%u\n", *class); - return 0; - - fail: - ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); - return rc; -} - -static int ahci_check_ready(struct ata_link *link) -{ - void __iomem *port_mmio = ahci_port_base(link->ap); - u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; - - return ata_check_ready(status); -} - -static int ahci_softreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) -{ - int pmp = sata_srst_pmp(link); - - DPRINTK("ENTER\n"); - - return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); + ahci_init_controller(host); } static int ahci_sb600_check_ready(struct ata_link *link) @@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, return rc; } -static int ahci_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) -{ - const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); - struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; - struct ata_taskfile tf; - bool online; - int rc; - - DPRINTK("ENTER\n"); - - ahci_stop_engine(ap); - - /* clear D2H reception area to properly wait for D2H FIS */ - ata_tf_init(link->device, &tf); - tf.command = 0x80; - ata_tf_to_fis(&tf, 0, 0, d2h_fis); - - rc = sata_link_hardreset(link, timing, deadline, &online, - ahci_check_ready); - - ahci_start_engine(ap); - - if (online) - *class = ahci_dev_classify(ap); - - DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); - return rc; -} - static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { @@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, return rc; } -static void ahci_postreset(struct ata_link *link, unsigned int *class) -{ - struct ata_port *ap = link->ap; - void __iomem *port_mmio = ahci_port_base(ap); - u32 new_tmp, tmp; - - ata_std_postreset(link, class); - - /* Make sure port's ATAPI bit is set appropriately */ - new_tmp = tmp = readl(port_mmio + PORT_CMD); - if (*class == ATA_DEV_ATAPI) - new_tmp |= PORT_CMD_ATAPI; - else - new_tmp &= ~PORT_CMD_ATAPI; - if (new_tmp != tmp) { - writel(new_tmp, port_mmio + PORT_CMD); - readl(port_mmio + PORT_CMD); /* flush */ - } -} - -static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) -{ - struct scatterlist *sg; - struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; - unsigned int si; - - VPRINTK("ENTER\n"); - - /* - * Next, the S/G list. - */ - for_each_sg(qc->sg, sg, qc->n_elem, si) { - dma_addr_t addr = sg_dma_address(sg); - u32 sg_len = sg_dma_len(sg); - - ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); - ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); - ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); - } - - return si; -} - -static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct ahci_port_priv *pp = ap->private_data; - - if (!sata_pmp_attached(ap) || pp->fbs_enabled) - return ata_std_qc_defer(qc); - else - return sata_pmp_qc_defer_cmd_switch(qc); -} - -static void ahci_qc_prep(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct ahci_port_priv *pp = ap->private_data; - int is_atapi = ata_is_atapi(qc->tf.protocol); - void *cmd_tbl; - u32 opts; - const u32 cmd_fis_len = 5; /* five dwords */ - unsigned int n_elem; - - /* - * Fill in command table information. First, the header, - * a SATA Register - Host to Device command FIS. - */ - cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; - - ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); - if (is_atapi) { - memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); - memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); - } - - n_elem = 0; - if (qc->flags & ATA_QCFLAG_DMAMAP) - n_elem = ahci_fill_sg(qc, cmd_tbl); - - /* - * Fill in command slot information. - */ - opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); - if (qc->tf.flags & ATA_TFLAG_WRITE) - opts |= AHCI_CMD_WRITE; - if (is_atapi) - opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; - - ahci_fill_cmd_slot(pp, qc->tag, opts); -} - -static void ahci_fbs_dec_intr(struct ata_port *ap) -{ - struct ahci_port_priv *pp = ap->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 fbs = readl(port_mmio + PORT_FBS); - int retries = 3; - - DPRINTK("ENTER\n"); - BUG_ON(!pp->fbs_enabled); - - /* time to wait for DEC is not specified by AHCI spec, - * add a retry loop for safety. - */ - writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS); - fbs = readl(port_mmio + PORT_FBS); - while ((fbs & PORT_FBS_DEC) && retries--) { - udelay(1); - fbs = readl(port_mmio + PORT_FBS); - } - - if (fbs & PORT_FBS_DEC) - dev_printk(KERN_ERR, ap->host->dev, - "failed to clear device error\n"); -} - -static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - struct ahci_port_priv *pp = ap->private_data; - struct ata_eh_info *host_ehi = &ap->link.eh_info; - struct ata_link *link = NULL; - struct ata_queued_cmd *active_qc; - struct ata_eh_info *active_ehi; - bool fbs_need_dec = false; - u32 serror; - - /* determine active link with error */ - if (pp->fbs_enabled) { - void __iomem *port_mmio = ahci_port_base(ap); - u32 fbs = readl(port_mmio + PORT_FBS); - int pmp = fbs >> PORT_FBS_DWE_OFFSET; - - if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && - ata_link_online(&ap->pmp_link[pmp])) { - link = &ap->pmp_link[pmp]; - fbs_need_dec = true; - } - - } else - ata_for_each_link(link, ap, EDGE) - if (ata_link_active(link)) - break; - - if (!link) - link = &ap->link; - - active_qc = ata_qc_from_tag(ap, link->active_tag); - active_ehi = &link->eh_info; - - /* record irq stat */ - ata_ehi_clear_desc(host_ehi); - ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); - - /* AHCI needs SError cleared; otherwise, it might lock up */ - ahci_scr_read(&ap->link, SCR_ERROR, &serror); - ahci_scr_write(&ap->link, SCR_ERROR, serror); - host_ehi->serror |= serror; - - /* some controllers set IRQ_IF_ERR on device errors, ignore it */ - if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) - irq_stat &= ~PORT_IRQ_IF_ERR; - - if (irq_stat & PORT_IRQ_TF_ERR) { - /* If qc is active, charge it; otherwise, the active - * link. There's no active qc on NCQ errors. It will - * be determined by EH by reading log page 10h. - */ - if (active_qc) - active_qc->err_mask |= AC_ERR_DEV; - else - active_ehi->err_mask |= AC_ERR_DEV; - - if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) - host_ehi->serror &= ~SERR_INTERNAL; - } - - if (irq_stat & PORT_IRQ_UNK_FIS) { - u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); - - active_ehi->err_mask |= AC_ERR_HSM; - active_ehi->action |= ATA_EH_RESET; - ata_ehi_push_desc(active_ehi, - "unknown FIS %08x %08x %08x %08x" , - unk[0], unk[1], unk[2], unk[3]); - } - - if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { - active_ehi->err_mask |= AC_ERR_HSM; - active_ehi->action |= ATA_EH_RESET; - ata_ehi_push_desc(active_ehi, "incorrect PMP"); - } - - if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { - host_ehi->err_mask |= AC_ERR_HOST_BUS; - host_ehi->action |= ATA_EH_RESET; - ata_ehi_push_desc(host_ehi, "host bus error"); - } - - if (irq_stat & PORT_IRQ_IF_ERR) { - if (fbs_need_dec) - active_ehi->err_mask |= AC_ERR_DEV; - else { - host_ehi->err_mask |= AC_ERR_ATA_BUS; - host_ehi->action |= ATA_EH_RESET; - } - - ata_ehi_push_desc(host_ehi, "interface fatal error"); - } - - if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { - ata_ehi_hotplugged(host_ehi); - ata_ehi_push_desc(host_ehi, "%s", - irq_stat & PORT_IRQ_CONNECT ? - "connection status changed" : "PHY RDY changed"); - } - - /* okay, let's hand over to EH */ - - if (irq_stat & PORT_IRQ_FREEZE) - ata_port_freeze(ap); - else if (fbs_need_dec) { - ata_link_abort(link); - ahci_fbs_dec_intr(ap); - } else - ata_port_abort(ap); -} - -static void ahci_port_intr(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ata_eh_info *ehi = &ap->link.eh_info; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_host_priv *hpriv = ap->host->private_data; - int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); - u32 status, qc_active = 0; - int rc; - - status = readl(port_mmio + PORT_IRQ_STAT); - writel(status, port_mmio + PORT_IRQ_STAT); - - /* ignore BAD_PMP while resetting */ - if (unlikely(resetting)) - status &= ~PORT_IRQ_BAD_PMP; - - /* If we are getting PhyRdy, this is - * just a power state change, we should - * clear out this, plus the PhyRdy/Comm - * Wake bits from Serror - */ - if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && - (status & PORT_IRQ_PHYRDY)) { - status &= ~PORT_IRQ_PHYRDY; - ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); - } - - if (unlikely(status & PORT_IRQ_ERROR)) { - ahci_error_intr(ap, status); - return; - } - - if (status & PORT_IRQ_SDB_FIS) { - /* If SNotification is available, leave notification - * handling to sata_async_notification(). If not, - * emulate it by snooping SDB FIS RX area. - * - * Snooping FIS RX area is probably cheaper than - * poking SNotification but some constrollers which - * implement SNotification, ICH9 for example, don't - * store AN SDB FIS into receive area. - */ - if (hpriv->cap & HOST_CAP_SNTF) - sata_async_notification(ap); - else { - /* If the 'N' bit in word 0 of the FIS is set, - * we just received asynchronous notification. - * Tell libata about it. - * - * Lack of SNotification should not appear in - * ahci 1.2, so the workaround is unnecessary - * when FBS is enabled. - */ - if (pp->fbs_enabled) - WARN_ON_ONCE(1); - else { - const __le32 *f = pp->rx_fis + RX_FIS_SDB; - u32 f0 = le32_to_cpu(f[0]); - if (f0 & (1 << 15)) - sata_async_notification(ap); - } - } - } - - /* pp->active_link is not reliable once FBS is enabled, both - * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because - * NCQ and non-NCQ commands may be in flight at the same time. - */ - if (pp->fbs_enabled) { - if (ap->qc_active) { - qc_active = readl(port_mmio + PORT_SCR_ACT); - qc_active |= readl(port_mmio + PORT_CMD_ISSUE); - } - } else { - /* pp->active_link is valid iff any command is in flight */ - if (ap->qc_active && pp->active_link->sactive) - qc_active = readl(port_mmio + PORT_SCR_ACT); - else - qc_active = readl(port_mmio + PORT_CMD_ISSUE); - } - - rc = ata_qc_complete_multiple(ap, qc_active); - - /* while resetting, invalid completions are expected */ - if (unlikely(rc < 0 && !resetting)) { - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_RESET; - ata_port_freeze(ap); - } -} - -static irqreturn_t ahci_interrupt(int irq, void *dev_instance) -{ - struct ata_host *host = dev_instance; - struct ahci_host_priv *hpriv; - unsigned int i, handled = 0; - void __iomem *mmio; - u32 irq_stat, irq_masked; - - VPRINTK("ENTER\n"); - - hpriv = host->private_data; - mmio = host->iomap[AHCI_PCI_BAR]; - - /* sigh. 0xffffffff is a valid return from h/w */ - irq_stat = readl(mmio + HOST_IRQ_STAT); - if (!irq_stat) - return IRQ_NONE; - - irq_masked = irq_stat & hpriv->port_map; - - spin_lock(&host->lock); - - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - - if (!(irq_masked & (1 << i))) - continue; - - ap = host->ports[i]; - if (ap) { - ahci_port_intr(ap); - VPRINTK("port %u\n", i); - } else { - VPRINTK("port %u (no irq)\n", i); - if (ata_ratelimit()) - dev_printk(KERN_WARNING, host->dev, - "interrupt on disabled port %u\n", i); - } - - handled = 1; - } - - /* HOST_IRQ_STAT behaves as level triggered latch meaning that - * it should be cleared after all the port events are cleared; - * otherwise, it will raise a spurious interrupt after each - * valid one. Please read section 10.6.2 of ahci 1.1 for more - * information. - * - * Also, use the unmasked value to clear interrupt as spurious - * pending event on a dummy port might cause screaming IRQ. - */ - writel(irq_stat, mmio + HOST_IRQ_STAT); - - spin_unlock(&host->lock); - - VPRINTK("EXIT\n"); - - return IRQ_RETVAL(handled); -} - -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_port_priv *pp = ap->private_data; - - /* Keep track of the currently active link. It will be used - * in completion path to determine whether NCQ phase is in - * progress. - */ - pp->active_link = qc->dev->link; - - if (qc->tf.protocol == ATA_PROT_NCQ) - writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); - - if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { - u32 fbs = readl(port_mmio + PORT_FBS); - fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); - fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; - writel(fbs, port_mmio + PORT_FBS); - pp->fbs_last_dev = qc->dev->link->pmp; - } - - writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); - - ahci_sw_activity(qc->dev->link); - - return 0; -} - -static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) -{ - struct ahci_port_priv *pp = qc->ap->private_data; - u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; - - if (pp->fbs_enabled) - d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; - - ata_tf_from_fis(d2h_fis, &qc->result_tf); - return true; -} - -static void ahci_freeze(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - - /* turn IRQ off */ - writel(0, port_mmio + PORT_IRQ_MASK); -} - -static void ahci_thaw(struct ata_port *ap) -{ - void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; - void __iomem *port_mmio = ahci_port_base(ap); - u32 tmp; - struct ahci_port_priv *pp = ap->private_data; - - /* clear IRQ */ - tmp = readl(port_mmio + PORT_IRQ_STAT); - writel(tmp, port_mmio + PORT_IRQ_STAT); - writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); - - /* turn IRQ back on */ - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); -} - -static void ahci_error_handler(struct ata_port *ap) -{ - if (!(ap->pflags & ATA_PFLAG_FROZEN)) { - /* restart engine */ - ahci_stop_engine(ap); - ahci_start_engine(ap); - } - - sata_pmp_error_handler(ap); -} - -static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - - /* make DMA engine forget about the failed command */ - if (qc->flags & ATA_QCFLAG_FAILED) - ahci_kick_engine(ap); -} - -static void ahci_enable_fbs(struct ata_port *ap) -{ - struct ahci_port_priv *pp = ap->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 fbs; - int rc; - - if (!pp->fbs_supported) - return; - - fbs = readl(port_mmio + PORT_FBS); - if (fbs & PORT_FBS_EN) { - pp->fbs_enabled = true; - pp->fbs_last_dev = -1; /* initialization */ - return; - } - - rc = ahci_stop_engine(ap); - if (rc) - return; - - writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); - fbs = readl(port_mmio + PORT_FBS); - if (fbs & PORT_FBS_EN) { - dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n"); - pp->fbs_enabled = true; - pp->fbs_last_dev = -1; /* initialization */ - } else - dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n"); - - ahci_start_engine(ap); -} - -static void ahci_disable_fbs(struct ata_port *ap) -{ - struct ahci_port_priv *pp = ap->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 fbs; - int rc; - - if (!pp->fbs_supported) - return; - - fbs = readl(port_mmio + PORT_FBS); - if ((fbs & PORT_FBS_EN) == 0) { - pp->fbs_enabled = false; - return; - } - - rc = ahci_stop_engine(ap); - if (rc) - return; - - writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS); - fbs = readl(port_mmio + PORT_FBS); - if (fbs & PORT_FBS_EN) - dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n"); - else { - dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n"); - pp->fbs_enabled = false; - } - - ahci_start_engine(ap); -} - -static void ahci_pmp_attach(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_port_priv *pp = ap->private_data; - u32 cmd; - - cmd = readl(port_mmio + PORT_CMD); - cmd |= PORT_CMD_PMP; - writel(cmd, port_mmio + PORT_CMD); - - ahci_enable_fbs(ap); - - pp->intr_mask |= PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); -} - -static void ahci_pmp_detach(struct ata_port *ap) -{ - void __iomem *port_mmio = ahci_port_base(ap); - struct ahci_port_priv *pp = ap->private_data; - u32 cmd; - - ahci_disable_fbs(ap); - - cmd = readl(port_mmio + PORT_CMD); - cmd &= ~PORT_CMD_PMP; - writel(cmd, port_mmio + PORT_CMD); - - pp->intr_mask &= ~PORT_IRQ_BAD_PMP; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); -} - -static int ahci_port_resume(struct ata_port *ap) -{ - ahci_power_up(ap); - ahci_start_port(ap); - - if (sata_pmp_attached(ap)) - ahci_pmp_attach(ap); - else - ahci_pmp_detach(ap); - - return 0; -} - #ifdef CONFIG_PM -static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) -{ - const char *emsg = NULL; - int rc; - - rc = ahci_deinit_port(ap, &emsg); - if (rc == 0) - ahci_power_down(ap); - else { - ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); - ahci_start_port(ap); - } - - return rc; -} - static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct ahci_host_priv *hpriv = host->private_data; - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; + void __iomem *mmio = hpriv->mmio; u32 ctl; if (mesg.event & PM_EVENT_SUSPEND && @@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; - ahci_init_controller(host); + ahci_pci_init_controller(host); } ata_host_resume(host); @@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev) } #endif -static int ahci_port_start(struct ata_port *ap) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; - struct device *dev = ap->host->dev; - struct ahci_port_priv *pp; - void *mem; - dma_addr_t mem_dma; - size_t dma_sz, rx_fis_sz; - - pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); - if (!pp) - return -ENOMEM; - - /* check FBS capability */ - if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd = readl(port_mmio + PORT_CMD); - if (cmd & PORT_CMD_FBSCP) - pp->fbs_supported = true; - else - dev_printk(KERN_WARNING, dev, - "The port is not capable of FBS\n"); - } - - if (pp->fbs_supported) { - dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; - rx_fis_sz = AHCI_RX_FIS_SZ * 16; - } else { - dma_sz = AHCI_PORT_PRIV_DMA_SZ; - rx_fis_sz = AHCI_RX_FIS_SZ; - } - - mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); - if (!mem) - return -ENOMEM; - memset(mem, 0, dma_sz); - - /* - * First item in chunk of DMA memory: 32-slot command table, - * 32 bytes each in size - */ - pp->cmd_slot = mem; - pp->cmd_slot_dma = mem_dma; - - mem += AHCI_CMD_SLOT_SZ; - mem_dma += AHCI_CMD_SLOT_SZ; - - /* - * Second item: Received-FIS area - */ - pp->rx_fis = mem; - pp->rx_fis_dma = mem_dma; - - mem += rx_fis_sz; - mem_dma += rx_fis_sz; - - /* - * Third item: data area for storing a single command - * and its scatter-gather table - */ - pp->cmd_tbl = mem; - pp->cmd_tbl_dma = mem_dma; - - /* - * Save off initial list of interrupts to be enabled. - * This could be changed later - */ - pp->intr_mask = DEF_PORT_IRQ; - - ap->private_data = pp; - - /* engage engines, captain */ - return ahci_port_resume(ap); -} - -static void ahci_port_stop(struct ata_port *ap) -{ - const char *emsg = NULL; - int rc; - - /* de-initialize port */ - rc = ahci_deinit_port(ap, &emsg); - if (rc) - ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); -} - static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) { int rc; @@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) return 0; } -static void ahci_print_info(struct ata_host *host) +static void ahci_pci_print_info(struct ata_host *host) { - struct ahci_host_priv *hpriv = host->private_data; struct pci_dev *pdev = to_pci_dev(host->dev); - void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; - u32 vers, cap, cap2, impl, speed; - const char *speed_s; u16 cc; const char *scc_s; - vers = readl(mmio + HOST_VERSION); - cap = hpriv->cap; - cap2 = hpriv->cap2; - impl = hpriv->port_map; - - speed = (cap >> 20) & 0xf; - if (speed == 1) - speed_s = "1.5"; - else if (speed == 2) - speed_s = "3"; - else if (speed == 3) - speed_s = "6"; - else - speed_s = "?"; - pci_read_config_word(pdev, 0x0a, &cc); if (cc == PCI_CLASS_STORAGE_IDE) scc_s = "IDE"; @@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host) else scc_s = "unknown"; - dev_printk(KERN_INFO, &pdev->dev, - "AHCI %02x%02x.%02x%02x " - "%u slots %u ports %s Gbps 0x%x impl %s mode\n" - , - - (vers >> 24) & 0xff, - (vers >> 16) & 0xff, - (vers >> 8) & 0xff, - vers & 0xff, - - ((cap >> 8) & 0x1f) + 1, - (cap & 0x1f) + 1, - speed_s, - impl, - scc_s); - - dev_printk(KERN_INFO, &pdev->dev, - "flags: " - "%s%s%s%s%s%s%s" - "%s%s%s%s%s%s%s" - "%s%s%s%s%s%s\n" - , - - cap & HOST_CAP_64 ? "64bit " : "", - cap & HOST_CAP_NCQ ? "ncq " : "", - cap & HOST_CAP_SNTF ? "sntf " : "", - cap & HOST_CAP_MPS ? "ilck " : "", - cap & HOST_CAP_SSS ? "stag " : "", - cap & HOST_CAP_ALPM ? "pm " : "", - cap & HOST_CAP_LED ? "led " : "", - cap & HOST_CAP_CLO ? "clo " : "", - cap & HOST_CAP_ONLY ? "only " : "", - cap & HOST_CAP_PMP ? "pmp " : "", - cap & HOST_CAP_FBS ? "fbs " : "", - cap & HOST_CAP_PIO_MULTI ? "pio " : "", - cap & HOST_CAP_SSC ? "slum " : "", - cap & HOST_CAP_PART ? "part " : "", - cap & HOST_CAP_CCC ? "ccc " : "", - cap & HOST_CAP_EMS ? "ems " : "", - cap & HOST_CAP_SXS ? "sxs " : "", - cap2 & HOST_CAP2_APST ? "apst " : "", - cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "", - cap2 & HOST_CAP2_BOH ? "boh " : "" - ); + ahci_print_info(host, scc_s); } /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is @@ -3249,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) return -ENODEV; + /* + * For some reason, MCP89 on MacBook 7,1 doesn't work with + * ahci, use ata_generic instead. + */ + if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && + pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + pdev->subsystem_device == 0xcb89) + return -ENODEV; + /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. * At the moment, we can only use the AHCI mode. Let the users know * that for SAS drives they're out of luck. @@ -3308,41 +1122,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) pci_intx(pdev, 1); + hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + /* save initial config */ - ahci_save_initial_config(pdev, hpriv); + ahci_pci_save_initial_config(pdev, hpriv); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; - /* Auto-activate optimization is supposed to be supported on - all AHCI controllers indicating NCQ support, but it seems - to be broken at least on some NVIDIA MCP79 chipsets. - Until we get info on which NVIDIA chipsets don't have this - issue, if any, disable AA on all NVIDIA AHCIs. */ - if (pdev->vendor != PCI_VENDOR_ID_NVIDIA) + /* + * Auto-activate optimization is supposed to be + * supported on all AHCI controllers indicating NCQ + * capability, but it seems to be broken on some + * chipsets including NVIDIAs. + */ + if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA)) pi.flags |= ATA_FLAG_FPDMA_AA; } if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; - if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { - u8 messages; - void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; - u32 em_loc = readl(mmio + HOST_EM_LOC); - u32 em_ctl = readl(mmio + HOST_EM_CTL); - - messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; - - /* we only support LED message type right now */ - if ((messages & 0x01) && (ahci_em_messages == 1)) { - /* store em_loc */ - hpriv->em_loc = ((em_loc >> 16) * 4); - pi.flags |= ATA_FLAG_EM; - if (!(em_ctl & EM_CTL_ALHD)) - pi.flags |= ATA_FLAG_SW_ACTIVITY; - } - } + ahci_set_em_messages(hpriv, &pi); if (ahci_broken_system_poweroff(pdev)) { pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; @@ -3372,7 +1173,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); if (!host) return -ENOMEM; - host->iomap = pcim_iomap_table(pdev); host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) @@ -3395,7 +1195,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) - ap->em_message_type = ahci_em_messages; + ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ @@ -3414,12 +1214,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; - ahci_init_controller(host); - ahci_print_info(host); + ahci_pci_init_controller(host); + ahci_pci_print_info(host); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h new file mode 100644 index 00000000000..7113c572447 --- /dev/null +++ b/drivers/ata/ahci.h @@ -0,0 +1,343 @@ +/* + * ahci.h - Common AHCI SATA definitions and declarations + * + * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2004-2005 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#ifndef _AHCI_H +#define _AHCI_H + +#include <linux/libata.h> + +/* Enclosure Management Control */ +#define EM_CTRL_MSG_TYPE 0x000f0000 + +/* Enclosure Management LED Message Type */ +#define EM_MSG_LED_HBA_PORT 0x0000000f +#define EM_MSG_LED_PMP_SLOT 0x0000ff00 +#define EM_MSG_LED_VALUE 0xffff0000 +#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 +#define EM_MSG_LED_VALUE_OFF 0xfff80000 +#define EM_MSG_LED_VALUE_ON 0x00010000 + +enum { + AHCI_MAX_PORTS = 32, + AHCI_MAX_SG = 168, /* hardware max is 64K */ + AHCI_DMA_BOUNDARY = 0xffffffff, + AHCI_MAX_CMDS = 32, + AHCI_CMD_SZ = 32, + AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, + AHCI_RX_FIS_SZ = 256, + AHCI_CMD_TBL_CDB = 0x40, + AHCI_CMD_TBL_HDR_SZ = 0x80, + AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), + AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, + AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + + AHCI_RX_FIS_SZ, + AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + + AHCI_CMD_TBL_AR_SZ + + (AHCI_RX_FIS_SZ * 16), + AHCI_IRQ_ON_SG = (1 << 31), + AHCI_CMD_ATAPI = (1 << 5), + AHCI_CMD_WRITE = (1 << 6), + AHCI_CMD_PREFETCH = (1 << 7), + AHCI_CMD_RESET = (1 << 8), + AHCI_CMD_CLR_BUSY = (1 << 10), + + RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ + RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ + RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ + + /* global controller registers */ + HOST_CAP = 0x00, /* host capabilities */ + HOST_CTL = 0x04, /* global host control */ + HOST_IRQ_STAT = 0x08, /* interrupt status */ + HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ + HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ + HOST_EM_LOC = 0x1c, /* Enclosure Management location */ + HOST_EM_CTL = 0x20, /* Enclosure Management Control */ + HOST_CAP2 = 0x24, /* host capabilities, extended */ + + /* HOST_CTL bits */ + HOST_RESET = (1 << 0), /* reset controller; self-clear */ + HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ + HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ + + /* HOST_CAP bits */ + HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ + HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ + HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ + HOST_CAP_PART = (1 << 13), /* Partial state capable */ + HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ + HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ + HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ + HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ + HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ + HOST_CAP_CLO = (1 << 24), /* Command List Override support */ + HOST_CAP_LED = (1 << 25), /* Supports activity LED */ + HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ + HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ + HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ + HOST_CAP_SNTF = (1 << 29), /* SNotification register */ + HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ + HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ + + /* HOST_CAP2 bits */ + HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ + HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ + HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ + + /* registers for each SATA port */ + PORT_LST_ADDR = 0x00, /* command list DMA addr */ + PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ + PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ + PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ + PORT_IRQ_STAT = 0x10, /* interrupt status */ + PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ + PORT_CMD = 0x18, /* port command */ + PORT_TFDATA = 0x20, /* taskfile data */ + PORT_SIG = 0x24, /* device TF signature */ + PORT_CMD_ISSUE = 0x38, /* command issue */ + PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ + PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ + PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ + PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ + PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ + PORT_FBS = 0x40, /* FIS-based Switching */ + + /* PORT_IRQ_{STAT,MASK} bits */ + PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ + PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ + PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ + PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ + PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ + PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ + PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ + PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ + + PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ + PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ + PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ + PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ + PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ + PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ + PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ + PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ + PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ + + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | + PORT_IRQ_IF_ERR | + PORT_IRQ_CONNECT | + PORT_IRQ_PHYRDY | + PORT_IRQ_UNK_FIS | + PORT_IRQ_BAD_PMP, + PORT_IRQ_ERROR = PORT_IRQ_FREEZE | + PORT_IRQ_TF_ERR | + PORT_IRQ_HBUS_DATA_ERR, + DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, + + /* PORT_CMD bits */ + PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ + PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ + PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ + PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ + PORT_CMD_PMP = (1 << 17), /* PMP attached */ + PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ + PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ + PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ + PORT_CMD_CLO = (1 << 3), /* Command list override */ + PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ + PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ + PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ + + PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ + PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ + PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ + PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ + + PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */ + PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ + PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ + PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ + PORT_FBS_SDE = (1 << 2), /* FBS single device error */ + PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ + PORT_FBS_EN = (1 << 0), /* Enable FBS */ + + /* hpriv->flags bits */ + AHCI_HFLAG_NO_NCQ = (1 << 0), + AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ + AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ + AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ + AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ + AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ + AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ + AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ + AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as + link offline */ + AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ + AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ + + /* ap->flags bits */ + + AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | + ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | + ATA_FLAG_IPM, + + ICH_MAP = 0x90, /* ICH MAP register */ + + /* em constants */ + EM_MAX_SLOTS = 8, + EM_MAX_RETRY = 5, + + /* em_ctl bits */ + EM_CTL_RST = (1 << 9), /* Reset */ + EM_CTL_TM = (1 << 8), /* Transmit Message */ + EM_CTL_MR = (1 << 0), /* Message Recieved */ + EM_CTL_ALHD = (1 << 26), /* Activity LED */ + EM_CTL_XMT = (1 << 25), /* Transmit Only */ + EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ + + /* em message type */ + EM_MSG_TYPE_LED = (1 << 0), /* LED */ + EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ + EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ + EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ +}; + +struct ahci_cmd_hdr { + __le32 opts; + __le32 status; + __le32 tbl_addr; + __le32 tbl_addr_hi; + __le32 reserved[4]; +}; + +struct ahci_sg { + __le32 addr; + __le32 addr_hi; + __le32 reserved; + __le32 flags_size; +}; + +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + unsigned long saved_activity; + unsigned long activity; + unsigned long led_state; +}; + +struct ahci_port_priv { + struct ata_link *active_link; + struct ahci_cmd_hdr *cmd_slot; + dma_addr_t cmd_slot_dma; + void *cmd_tbl; + dma_addr_t cmd_tbl_dma; + void *rx_fis; + dma_addr_t rx_fis_dma; + /* for NCQ spurious interrupt analysis */ + unsigned int ncq_saw_d2h:1; + unsigned int ncq_saw_dmas:1; + unsigned int ncq_saw_sdb:1; + u32 intr_mask; /* interrupts to enable */ + bool fbs_supported; /* set iff FBS is supported */ + bool fbs_enabled; /* set iff FBS is enabled */ + int fbs_last_dev; /* save FBS.DEV of last FIS */ + /* enclosure management info per PM slot */ + struct ahci_em_priv em_priv[EM_MAX_SLOTS]; +}; + +struct ahci_host_priv { + void __iomem * mmio; /* bus-independant mem map */ + unsigned int flags; /* AHCI_HFLAG_* */ + u32 cap; /* cap to use */ + u32 cap2; /* cap2 to use */ + u32 port_map; /* port map to use */ + u32 saved_cap; /* saved initial cap */ + u32 saved_cap2; /* saved initial cap2 */ + u32 saved_port_map; /* saved initial port_map */ + u32 em_loc; /* enclosure management location */ + u32 em_buf_sz; /* EM buffer size in byte */ + u32 em_msg_type; /* EM message type */ +}; + +extern int ahci_ignore_sss; + +extern struct scsi_host_template ahci_sht; +extern struct ata_port_operations ahci_ops; + +void ahci_save_initial_config(struct device *dev, + struct ahci_host_priv *hpriv, + unsigned int force_port_map, + unsigned int mask_port_map); +void ahci_init_controller(struct ata_host *host); +int ahci_reset_controller(struct ata_host *host); + +int ahci_do_softreset(struct ata_link *link, unsigned int *class, + int pmp, unsigned long deadline, + int (*check_ready)(struct ata_link *link)); + +int ahci_stop_engine(struct ata_port *ap); +void ahci_start_engine(struct ata_port *ap); +int ahci_check_ready(struct ata_link *link); +int ahci_kick_engine(struct ata_port *ap); +void ahci_set_em_messages(struct ahci_host_priv *hpriv, + struct ata_port_info *pi); +int ahci_reset_em(struct ata_host *host); +irqreturn_t ahci_interrupt(int irq, void *dev_instance); +void ahci_print_info(struct ata_host *host, const char *scc_s); + +static inline void __iomem *__ahci_port_base(struct ata_host *host, + unsigned int port_no) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + + return mmio + 0x100 + (port_no * 0x80); +} + +static inline void __iomem *ahci_port_base(struct ata_port *ap) +{ + return __ahci_port_base(ap->host, ap->port_no); +} + +static inline int ahci_nr_ports(u32 cap) +{ + return (cap & 0x1f) + 1; +} + +#endif /* _AHCI_H */ diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c new file mode 100644 index 00000000000..5e11b160f24 --- /dev/null +++ b/drivers/ata/ahci_platform.c @@ -0,0 +1,192 @@ +/* + * AHCI SATA platform driver + * + * Copyright 2004-2005 Red Hat, Inc. + * Jeff Garzik <jgarzik@pobox.com> + * Copyright 2010 MontaVista Software, LLC. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + */ + +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/libata.h> +#include <linux/ahci_platform.h> +#include "ahci.h" + +static int __init ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_platform_data *pdata = dev->platform_data; + struct ata_port_info pi = { + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct ahci_host_priv *hpriv; + struct ata_host *host; + struct resource *mem; + int irq; + int n_ports; + int i; + int rc; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "no mmio space\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "no irq\n"); + return -EINVAL; + } + + if (pdata && pdata->init) { + rc = pdata->init(dev); + if (rc) + return rc; + } + + if (pdata && pdata->ata_port_info) + pi = *pdata->ata_port_info; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) { + rc = -ENOMEM; + goto err0; + } + + hpriv->flags |= (unsigned long)pi.private_data; + + hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); + if (!hpriv->mmio) { + dev_err(dev, "can't map %pR\n", mem); + rc = -ENOMEM; + goto err0; + } + + ahci_save_initial_config(dev, hpriv, + pdata ? pdata->force_port_map : 0, + pdata ? pdata->mask_port_map : 0); + + /* prepare host */ + if (hpriv->cap & HOST_CAP_NCQ) + pi.flags |= ATA_FLAG_NCQ; + + if (hpriv->cap & HOST_CAP_PMP) + pi.flags |= ATA_FLAG_PMP; + + ahci_set_em_messages(hpriv, &pi); + + /* CAP.NP sometimes indicate the index of the last enabled + * port, at other times, that of the last possible port, so + * determining the maximum port number requires looking at + * both CAP.NP and port_map. + */ + n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); + + host = ata_host_alloc_pinfo(dev, ppi, n_ports); + if (!host) { + rc = -ENOMEM; + goto err0; + } + + host->private_data = hpriv; + + if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) + host->flags |= ATA_HOST_PARALLEL_SCAN; + else + printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); + + if (pi.flags & ATA_FLAG_EM) + ahci_reset_em(host); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + ata_port_desc(ap, "mmio %pR", mem); + ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); + + /* set initial link pm policy */ + ap->pm_policy = NOT_AVAILABLE; + + /* set enclosure management message type */ + if (ap->flags & ATA_FLAG_EM) + ap->em_message_type = hpriv->em_msg_type; + + /* disabled/not-implemented port */ + if (!(hpriv->port_map & (1 << i))) + ap->ops = &ata_dummy_port_ops; + } + + rc = ahci_reset_controller(host); + if (rc) + goto err0; + + ahci_init_controller(host); + ahci_print_info(host, "platform"); + + rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, + &ahci_sht); + if (rc) + goto err0; + + return 0; +err0: + if (pdata && pdata->exit) + pdata->exit(dev); + return rc; +} + +static int __devexit ahci_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_platform_data *pdata = dev->platform_data; + struct ata_host *host = dev_get_drvdata(dev); + + ata_host_detach(host); + + if (pdata && pdata->exit) + pdata->exit(dev); + + return 0; +} + +static struct platform_driver ahci_driver = { + .probe = ahci_probe, + .remove = __devexit_p(ahci_remove), + .driver = { + .name = "ahci", + .owner = THIS_MODULE, + }, +}; + +static int __init ahci_init(void) +{ + return platform_driver_probe(&ahci_driver, ahci_probe); +} +module_init(ahci_init); + +static void __exit ahci_exit(void) +{ + platform_driver_unregister(&ahci_driver); +} +module_exit(ahci_exit); + +MODULE_DESCRIPTION("AHCI SATA platform driver"); +MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ahci"); diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 33fb614f978..7107a6929de 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -32,6 +32,11 @@ * A generic parallel ATA driver using libata */ +enum { + ATA_GEN_CLASS_MATCH = (1 << 0), + ATA_GEN_FORCE_DMA = (1 << 1), +}; + /** * generic_set_mode - mode setting * @link: link to set up @@ -46,13 +51,17 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_port *ap = link->ap; + const struct pci_device_id *id = ap->host->private_data; int dma_enabled = 0; struct ata_device *dev; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - /* Bits 5 and 6 indicate if DMA is active on master/slave */ - if (ap->ioaddr.bmdma_addr) + if (id->driver_data & ATA_GEN_FORCE_DMA) { + dma_enabled = 0xff; + } else if (ap->ioaddr.bmdma_addr) { + /* Bits 5 and 6 indicate if DMA is active on master/slave */ dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } if (pdev->vendor == PCI_VENDOR_ID_CENATEK) dma_enabled = 0xFF; @@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id const struct ata_port_info *ppi[] = { &info, NULL }; /* Don't use the generic entry unless instructed to do so */ - if (id->driver_data == 1 && all_generic_ide == 0) + if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) return -ENODEV; /* Devices that need care */ @@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id return rc; pcim_pin_device(dev); } - return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0); } static struct pci_device_id ata_generic[] = { @@ -167,7 +176,15 @@ static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), }, { PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), }, - { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), }, + { PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), + .driver_data = ATA_GEN_FORCE_DMA }, + /* + * For some reason, MCP89 on MacBook 7,1 doesn't work with + * ahci, use ata_generic instead. + */ + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA, + PCI_VENDOR_ID_APPLE, 0xcb89, + .driver_data = ATA_GEN_FORCE_DMA }, #if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE) { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, @@ -175,7 +192,8 @@ static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, #endif /* Must come last. If you add entries adjust this table appropriately */ - { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1}, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), + .driver_data = ATA_GEN_CLASS_MATCH }, { 0, }, }; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 83bc49fac9b..7409f98d2ae 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -43,7 +43,7 @@ * driver the list of errata that are relevant is below, going back to * PIIX4. Older device documentation is now a bit tricky to find. * - * The chipsets all follow very much the same design. The orginal Triton + * The chipsets all follow very much the same design. The original Triton * series chipsets do _not_ support independant device timings, but this * is fixed in Triton II. With the odd mobile exception the chips then * change little except in gaining more modes until SATA arrives. This @@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, hpriv->map = piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); } static void piix_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c new file mode 100644 index 00000000000..81e772a94d5 --- /dev/null +++ b/drivers/ata/libahci.c @@ -0,0 +1,2202 @@ +/* + * libahci.c - Common AHCI SATA low-level routines + * + * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Please ALWAYS copy linux-ide@vger.kernel.org + * on emails. + * + * Copyright 2004-2005 Red Hat, Inc. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * AHCI hardware documentation: + * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf + * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf + * + */ + +#include <linux/kernel.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <linux/libata.h> +#include "ahci.h" + +static int ahci_skip_host_reset; +int ahci_ignore_sss; +EXPORT_SYMBOL_GPL(ahci_ignore_sss); + +module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); +MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); + +module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); +MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); + +static int ahci_enable_alpm(struct ata_port *ap, + enum link_pm policy); +static void ahci_disable_alpm(struct ata_port *ap); +static ssize_t ahci_led_show(struct ata_port *ap, char *buf); +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size); +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size); + + + +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); +static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); +static int ahci_port_start(struct ata_port *ap); +static void ahci_port_stop(struct ata_port *ap); +static void ahci_qc_prep(struct ata_queued_cmd *qc); +static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); +static void ahci_freeze(struct ata_port *ap); +static void ahci_thaw(struct ata_port *ap); +static void ahci_enable_fbs(struct ata_port *ap); +static void ahci_disable_fbs(struct ata_port *ap); +static void ahci_pmp_attach(struct ata_port *ap); +static void ahci_pmp_detach(struct ata_port *ap); +static int ahci_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static int ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +static void ahci_postreset(struct ata_link *link, unsigned int *class); +static void ahci_error_handler(struct ata_port *ap); +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); +static int ahci_port_resume(struct ata_port *ap); +static void ahci_dev_config(struct ata_device *dev); +static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts); +#ifdef CONFIG_PM +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); +#endif +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); +static ssize_t ahci_activity_store(struct ata_device *dev, + enum sw_activity val); +static void ahci_init_sw_activity(struct ata_link *link); + +static ssize_t ahci_show_host_caps(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_host_cap2(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_host_version(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_show_port_cmd(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_read_em_buffer(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t ahci_store_em_buffer(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size); + +static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); +static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); +static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); +static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); +static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, + ahci_read_em_buffer, ahci_store_em_buffer); + +static struct device_attribute *ahci_shost_attrs[] = { + &dev_attr_link_power_management_policy, + &dev_attr_em_message_type, + &dev_attr_em_message, + &dev_attr_ahci_host_caps, + &dev_attr_ahci_host_cap2, + &dev_attr_ahci_host_version, + &dev_attr_ahci_port_cmd, + &dev_attr_em_buffer, + NULL +}; + +static struct device_attribute *ahci_sdev_attrs[] = { + &dev_attr_sw_activity, + &dev_attr_unload_heads, + NULL +}; + +struct scsi_host_template ahci_sht = { + ATA_NCQ_SHT("ahci"), + .can_queue = AHCI_MAX_CMDS - 1, + .sg_tablesize = AHCI_MAX_SG, + .dma_boundary = AHCI_DMA_BOUNDARY, + .shost_attrs = ahci_shost_attrs, + .sdev_attrs = ahci_sdev_attrs, +}; +EXPORT_SYMBOL_GPL(ahci_sht); + +struct ata_port_operations ahci_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = ahci_pmp_qc_defer, + .qc_prep = ahci_qc_prep, + .qc_issue = ahci_qc_issue, + .qc_fill_rtf = ahci_qc_fill_rtf, + + .freeze = ahci_freeze, + .thaw = ahci_thaw, + .softreset = ahci_softreset, + .hardreset = ahci_hardreset, + .postreset = ahci_postreset, + .pmp_softreset = ahci_softreset, + .error_handler = ahci_error_handler, + .post_internal_cmd = ahci_post_internal_cmd, + .dev_config = ahci_dev_config, + + .scr_read = ahci_scr_read, + .scr_write = ahci_scr_write, + .pmp_attach = ahci_pmp_attach, + .pmp_detach = ahci_pmp_detach, + + .enable_pm = ahci_enable_alpm, + .disable_pm = ahci_disable_alpm, + .em_show = ahci_led_show, + .em_store = ahci_led_store, + .sw_activity_show = ahci_activity_show, + .sw_activity_store = ahci_activity_store, +#ifdef CONFIG_PM + .port_suspend = ahci_port_suspend, + .port_resume = ahci_port_resume, +#endif + .port_start = ahci_port_start, + .port_stop = ahci_port_stop, +}; +EXPORT_SYMBOL_GPL(ahci_ops); + +int ahci_em_messages = 1; +EXPORT_SYMBOL_GPL(ahci_em_messages); +module_param(ahci_em_messages, int, 0444); +/* add other LED protocol types when they become supported */ +MODULE_PARM_DESC(ahci_em_messages, + "AHCI Enclosure Management Message control (0 = off, 1 = on)"); + +static void ahci_enable_ahci(void __iomem *mmio) +{ + int i; + u32 tmp; + + /* turn on AHCI_EN */ + tmp = readl(mmio + HOST_CTL); + if (tmp & HOST_AHCI_EN) + return; + + /* Some controllers need AHCI_EN to be written multiple times. + * Try a few times before giving up. + */ + for (i = 0; i < 5; i++) { + tmp |= HOST_AHCI_EN; + writel(tmp, mmio + HOST_CTL); + tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ + if (tmp & HOST_AHCI_EN) + return; + msleep(10); + } + + WARN_ON(1); +} + +static ssize_t ahci_show_host_caps(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + + return sprintf(buf, "%x\n", hpriv->cap); +} + +static ssize_t ahci_show_host_cap2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + + return sprintf(buf, "%x\n", hpriv->cap2); +} + +static ssize_t ahci_show_host_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + + return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); +} + +static ssize_t ahci_show_port_cmd(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + void __iomem *port_mmio = ahci_port_base(ap); + + return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); +} + +static ssize_t ahci_read_em_buffer(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *em_mmio = mmio + hpriv->em_loc; + u32 em_ctl, msg; + unsigned long flags; + size_t count; + int i; + + spin_lock_irqsave(ap->lock, flags); + + em_ctl = readl(mmio + HOST_EM_CTL); + if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || + !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) { + spin_unlock_irqrestore(ap->lock, flags); + return -EINVAL; + } + + if (!(em_ctl & EM_CTL_MR)) { + spin_unlock_irqrestore(ap->lock, flags); + return -EAGAIN; + } + + if (!(em_ctl & EM_CTL_SMB)) + em_mmio += hpriv->em_buf_sz; + + count = hpriv->em_buf_sz; + + /* the count should not be larger than PAGE_SIZE */ + if (count > PAGE_SIZE) { + if (printk_ratelimit()) + ata_port_printk(ap, KERN_WARNING, + "EM read buffer size too large: " + "buffer size %u, page size %lu\n", + hpriv->em_buf_sz, PAGE_SIZE); + count = PAGE_SIZE; + } + + for (i = 0; i < count; i += 4) { + msg = readl(em_mmio + i); + buf[i] = msg & 0xff; + buf[i + 1] = (msg >> 8) & 0xff; + buf[i + 2] = (msg >> 16) & 0xff; + buf[i + 3] = (msg >> 24) & 0xff; + } + + spin_unlock_irqrestore(ap->lock, flags); + + return i; +} + +static ssize_t ahci_store_em_buffer(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *em_mmio = mmio + hpriv->em_loc; + const unsigned char *msg_buf = buf; + u32 em_ctl, msg; + unsigned long flags; + int i; + + /* check size validity */ + if (!(ap->flags & ATA_FLAG_EM) || + !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) || + size % 4 || size > hpriv->em_buf_sz) + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_TM) { + spin_unlock_irqrestore(ap->lock, flags); + return -EBUSY; + } + + for (i = 0; i < size; i += 4) { + msg = msg_buf[i] | msg_buf[i + 1] << 8 | + msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; + writel(msg, em_mmio + i); + } + + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); + + spin_unlock_irqrestore(ap->lock, flags); + + return size; +} + +/** + * ahci_save_initial_config - Save and fixup initial config values + * @dev: target AHCI device + * @hpriv: host private area to store config values + * @force_port_map: force port map to a specified value + * @mask_port_map: mask out particular bits from port map + * + * Some registers containing configuration info might be setup by + * BIOS and might be cleared on reset. This function saves the + * initial values of those registers into @hpriv such that they + * can be restored after controller reset. + * + * If inconsistent, config values are fixed up by this function. + * + * LOCKING: + * None. + */ +void ahci_save_initial_config(struct device *dev, + struct ahci_host_priv *hpriv, + unsigned int force_port_map, + unsigned int mask_port_map) +{ + void __iomem *mmio = hpriv->mmio; + u32 cap, cap2, vers, port_map; + int i; + + /* make sure AHCI mode is enabled before accessing CAP */ + ahci_enable_ahci(mmio); + + /* Values prefixed with saved_ are written back to host after + * reset. Values without are used for driver operation. + */ + hpriv->saved_cap = cap = readl(mmio + HOST_CAP); + hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); + + /* CAP2 register is only defined for AHCI 1.2 and later */ + vers = readl(mmio + HOST_VERSION); + if ((vers >> 16) > 1 || + ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200)) + hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2); + else + hpriv->saved_cap2 = cap2 = 0; + + /* some chips have errata preventing 64bit use */ + if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { + dev_printk(KERN_INFO, dev, + "controller can't do 64bit DMA, forcing 32bit\n"); + cap &= ~HOST_CAP_64; + } + + if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { + dev_printk(KERN_INFO, dev, + "controller can't do NCQ, turning off CAP_NCQ\n"); + cap &= ~HOST_CAP_NCQ; + } + + if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { + dev_printk(KERN_INFO, dev, + "controller can do NCQ, turning on CAP_NCQ\n"); + cap |= HOST_CAP_NCQ; + } + + if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { + dev_printk(KERN_INFO, dev, + "controller can't do PMP, turning off CAP_PMP\n"); + cap &= ~HOST_CAP_PMP; + } + + if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { + dev_printk(KERN_INFO, dev, + "controller can't do SNTF, turning off CAP_SNTF\n"); + cap &= ~HOST_CAP_SNTF; + } + + if (force_port_map && port_map != force_port_map) { + dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", + port_map, force_port_map); + port_map = force_port_map; + } + + if (mask_port_map) { + dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", + port_map, + port_map & mask_port_map); + port_map &= mask_port_map; + } + + /* cross check port_map and cap.n_ports */ + if (port_map) { + int map_ports = 0; + + for (i = 0; i < AHCI_MAX_PORTS; i++) + if (port_map & (1 << i)) + map_ports++; + + /* If PI has more ports than n_ports, whine, clear + * port_map and let it be generated from n_ports. + */ + if (map_ports > ahci_nr_ports(cap)) { + dev_printk(KERN_WARNING, dev, + "implemented port map (0x%x) contains more " + "ports than nr_ports (%u), using nr_ports\n", + port_map, ahci_nr_ports(cap)); + port_map = 0; + } + } + + /* fabricate port_map from cap.nr_ports */ + if (!port_map) { + port_map = (1 << ahci_nr_ports(cap)) - 1; + dev_printk(KERN_WARNING, dev, + "forcing PORTS_IMPL to 0x%x\n", port_map); + + /* write the fixed up value to the PI register */ + hpriv->saved_port_map = port_map; + } + + /* record values to use during operation */ + hpriv->cap = cap; + hpriv->cap2 = cap2; + hpriv->port_map = port_map; +} +EXPORT_SYMBOL_GPL(ahci_save_initial_config); + +/** + * ahci_restore_initial_config - Restore initial config + * @host: target ATA host + * + * Restore initial config stored by ahci_save_initial_config(). + * + * LOCKING: + * None. + */ +static void ahci_restore_initial_config(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + + writel(hpriv->saved_cap, mmio + HOST_CAP); + if (hpriv->saved_cap2) + writel(hpriv->saved_cap2, mmio + HOST_CAP2); + writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); + (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ +} + +static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) +{ + static const int offset[] = { + [SCR_STATUS] = PORT_SCR_STAT, + [SCR_CONTROL] = PORT_SCR_CTL, + [SCR_ERROR] = PORT_SCR_ERR, + [SCR_ACTIVE] = PORT_SCR_ACT, + [SCR_NOTIFICATION] = PORT_SCR_NTF, + }; + struct ahci_host_priv *hpriv = ap->host->private_data; + + if (sc_reg < ARRAY_SIZE(offset) && + (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) + return offset[sc_reg]; + return 0; +} + +static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + int offset = ahci_scr_offset(link->ap, sc_reg); + + if (offset) { + *val = readl(port_mmio + offset); + return 0; + } + return -EINVAL; +} + +static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + int offset = ahci_scr_offset(link->ap, sc_reg); + + if (offset) { + writel(val, port_mmio + offset); + return 0; + } + return -EINVAL; +} + +void ahci_start_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* start DMA */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); /* flush */ +} +EXPORT_SYMBOL_GPL(ahci_start_engine); + +int ahci_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_stop_engine); + +static void ahci_start_fis_rx(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + u32 tmp; + + /* set FIS registers */ + if (hpriv->cap & HOST_CAP_64) + writel((pp->cmd_slot_dma >> 16) >> 16, + port_mmio + PORT_LST_ADDR_HI); + writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); + + if (hpriv->cap & HOST_CAP_64) + writel((pp->rx_fis_dma >> 16) >> 16, + port_mmio + PORT_FIS_ADDR_HI); + writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); + + /* enable FIS reception */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_FIS_RX; + writel(tmp, port_mmio + PORT_CMD); + + /* flush */ + readl(port_mmio + PORT_CMD); +} + +static int ahci_stop_fis_rx(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + + /* disable FIS reception */ + tmp = readl(port_mmio + PORT_CMD); + tmp &= ~PORT_CMD_FIS_RX; + writel(tmp, port_mmio + PORT_CMD); + + /* wait for completion, spec says 500ms, give it 1000 */ + tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, + PORT_CMD_FIS_ON, 10, 1000); + if (tmp & PORT_CMD_FIS_ON) + return -EBUSY; + + return 0; +} + +static void ahci_power_up(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd; + + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; + + /* spin up device */ + if (hpriv->cap & HOST_CAP_SSS) { + cmd |= PORT_CMD_SPIN_UP; + writel(cmd, port_mmio + PORT_CMD); + } + + /* wake up link */ + writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); +} + +static void ahci_disable_alpm(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd; + struct ahci_port_priv *pp = ap->private_data; + + /* IPM bits should be disabled by libata-core */ + /* get the existing command bits */ + cmd = readl(port_mmio + PORT_CMD); + + /* disable ALPM and ASP */ + cmd &= ~PORT_CMD_ASP; + cmd &= ~PORT_CMD_ALPE; + + /* force the interface back to active */ + cmd |= PORT_CMD_ICC_ACTIVE; + + /* write out new cmd value */ + writel(cmd, port_mmio + PORT_CMD); + cmd = readl(port_mmio + PORT_CMD); + + /* wait 10ms to be sure we've come out of any low power state */ + msleep(10); + + /* clear out any PhyRdy stuff from interrupt status */ + writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); + + /* go ahead and clean out PhyRdy Change from Serror too */ + ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); + + /* + * Clear flag to indicate that we should ignore all PhyRdy + * state changes + */ + hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; + + /* + * Enable interrupts on Phy Ready. + */ + pp->intr_mask |= PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* + * don't change the link pm policy - we can be called + * just to turn of link pm temporarily + */ +} + +static int ahci_enable_alpm(struct ata_port *ap, + enum link_pm policy) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd; + struct ahci_port_priv *pp = ap->private_data; + u32 asp; + + /* Make sure the host is capable of link power management */ + if (!(hpriv->cap & HOST_CAP_ALPM)) + return -EINVAL; + + switch (policy) { + case MAX_PERFORMANCE: + case NOT_AVAILABLE: + /* + * if we came here with NOT_AVAILABLE, + * it just means this is the first time we + * have tried to enable - default to max performance, + * and let the user go to lower power modes on request. + */ + ahci_disable_alpm(ap); + return 0; + case MIN_POWER: + /* configure HBA to enter SLUMBER */ + asp = PORT_CMD_ASP; + break; + case MEDIUM_POWER: + /* configure HBA to enter PARTIAL */ + asp = 0; + break; + default: + return -EINVAL; + } + + /* + * Disable interrupts on Phy Ready. This keeps us from + * getting woken up due to spurious phy ready interrupts + * TBD - Hot plug should be done via polling now, is + * that even supported? + */ + pp->intr_mask &= ~PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + /* + * Set a flag to indicate that we should ignore all PhyRdy + * state changes since these can happen now whenever we + * change link state + */ + hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; + + /* get the existing command bits */ + cmd = readl(port_mmio + PORT_CMD); + + /* + * Set ASP based on Policy + */ + cmd |= asp; + + /* + * Setting this bit will instruct the HBA to aggressively + * enter a lower power link state when it's appropriate and + * based on the value set above for ASP + */ + cmd |= PORT_CMD_ALPE; + + /* write out new cmd value */ + writel(cmd, port_mmio + PORT_CMD); + cmd = readl(port_mmio + PORT_CMD); + + /* IPM bits should be set by libata-core */ + return 0; +} + +#ifdef CONFIG_PM +static void ahci_power_down(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd, scontrol; + + if (!(hpriv->cap & HOST_CAP_SSS)) + return; + + /* put device into listen mode, first set PxSCTL.DET to 0 */ + scontrol = readl(port_mmio + PORT_SCR_CTL); + scontrol &= ~0xf; + writel(scontrol, port_mmio + PORT_SCR_CTL); + + /* then set PxCMD.SUD to 0 */ + cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; + cmd &= ~PORT_CMD_SPIN_UP; + writel(cmd, port_mmio + PORT_CMD); +} +#endif + +static void ahci_start_port(struct ata_port *ap) +{ + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + ssize_t rc; + int i; + + /* enable FIS reception */ + ahci_start_fis_rx(ap); + + /* enable DMA */ + ahci_start_engine(ap); + + /* turn on LEDs */ + if (ap->flags & ATA_FLAG_EM) { + ata_for_each_link(link, ap, EDGE) { + emp = &pp->em_priv[link->pmp]; + + /* EM Transmit bit maybe busy during init */ + for (i = 0; i < EM_MAX_RETRY; i++) { + rc = ahci_transmit_led_message(ap, + emp->led_state, + 4); + if (rc == -EBUSY) + msleep(1); + else + break; + } + } + } + + if (ap->flags & ATA_FLAG_SW_ACTIVITY) + ata_for_each_link(link, ap, EDGE) + ahci_init_sw_activity(link); + +} + +static int ahci_deinit_port(struct ata_port *ap, const char **emsg) +{ + int rc; + + /* disable DMA */ + rc = ahci_stop_engine(ap); + if (rc) { + *emsg = "failed to stop engine"; + return rc; + } + + /* disable FIS reception */ + rc = ahci_stop_fis_rx(ap); + if (rc) { + *emsg = "failed stop FIS RX"; + return rc; + } + + return 0; +} + +int ahci_reset_controller(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 tmp; + + /* we must be in AHCI mode, before using anything + * AHCI-specific, such as HOST_RESET. + */ + ahci_enable_ahci(mmio); + + /* global controller reset */ + if (!ahci_skip_host_reset) { + tmp = readl(mmio + HOST_CTL); + if ((tmp & HOST_RESET) == 0) { + writel(tmp | HOST_RESET, mmio + HOST_CTL); + readl(mmio + HOST_CTL); /* flush */ + } + + /* + * to perform host reset, OS should set HOST_RESET + * and poll until this bit is read to be "0". + * reset must complete within 1 second, or + * the hardware should be considered fried. + */ + tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, + HOST_RESET, 10, 1000); + + if (tmp & HOST_RESET) { + dev_printk(KERN_ERR, host->dev, + "controller reset failed (0x%x)\n", tmp); + return -EIO; + } + + /* turn on AHCI mode */ + ahci_enable_ahci(mmio); + + /* Some registers might be cleared on reset. Restore + * initial values. + */ + ahci_restore_initial_config(host); + } else + dev_printk(KERN_INFO, host->dev, + "skipping global host reset\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_reset_controller); + +static void ahci_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) + return; + + emp->activity++; + if (!timer_pending(&emp->timer)) + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); +} + +static void ahci_sw_activity_blink(unsigned long arg) +{ + struct ata_link *link = (struct ata_link *)arg; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + unsigned long led_message = emp->led_state; + u32 activity_led_state; + unsigned long flags; + + led_message &= EM_MSG_LED_VALUE; + led_message |= ap->port_no | (link->pmp << 8); + + /* check to see if we've had activity. If so, + * toggle state of LED and reset timer. If not, + * turn LED to desired idle state. + */ + spin_lock_irqsave(ap->lock, flags); + if (emp->saved_activity != emp->activity) { + emp->saved_activity = emp->activity; + /* get the current LED state */ + activity_led_state = led_message & EM_MSG_LED_VALUE_ON; + + if (activity_led_state) + activity_led_state = 0; + else + activity_led_state = 1; + + /* clear old state */ + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + + /* toggle state */ + led_message |= (activity_led_state << 16); + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); + } else { + /* switch to idle */ + led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; + if (emp->blink_policy == BLINK_OFF) + led_message |= (1 << 16); + } + spin_unlock_irqrestore(ap->lock, flags); + ahci_transmit_led_message(ap, led_message, 4); +} + +static void ahci_init_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* init activity stats, setup timer */ + emp->saved_activity = emp->activity = 0; + setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); + + /* check our blink policy and set flag for link if it's enabled */ + if (emp->blink_policy) + link->flags |= ATA_LFLAG_SW_ACTIVITY; +} + +int ahci_reset_em(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + + em_ctl = readl(mmio + HOST_EM_CTL); + if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) + return -EINVAL; + + writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); + return 0; +} +EXPORT_SYMBOL_GPL(ahci_reset_em); + +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *mmio = hpriv->mmio; + u32 em_ctl; + u32 message[] = {0, 0}; + unsigned long flags; + int pmp; + struct ahci_em_priv *emp; + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp < EM_MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + /* + * if we are still busy transmitting a previous message, + * do not allow + */ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_TM) { + spin_unlock_irqrestore(ap->lock, flags); + return -EBUSY; + } + + if (hpriv->em_msg_type & EM_MSG_TYPE_LED) { + /* + * create message header - this is all zero except for + * the message size, which is 4 bytes. + */ + message[0] |= (4 << 8); + + /* ignore 0:4 of byte zero, fill in port info yourself */ + message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); + + /* write message to EM_LOC */ + writel(message[0], mmio + hpriv->em_loc); + writel(message[1], mmio + hpriv->em_loc+4); + + /* + * tell hardware to transmit the message + */ + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); + } + + /* save off new led state for port/slot */ + emp->led_state = state; + + spin_unlock_irqrestore(ap->lock, flags); + return size; +} + +static ssize_t ahci_led_show(struct ata_port *ap, char *buf) +{ + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + int rc = 0; + + ata_for_each_link(link, ap, EDGE) { + emp = &pp->em_priv[link->pmp]; + rc += sprintf(buf, "%lx\n", emp->led_state); + } + return rc; +} + +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size) +{ + int state; + int pmp; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp; + + state = simple_strtoul(buf, NULL, 0); + + /* get the slot number from the message */ + pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; + if (pmp < EM_MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + /* mask off the activity bits if we are in sw_activity + * mode, user should turn off sw_activity before setting + * activity led through em_message + */ + if (emp->blink_policy) + state &= ~EM_MSG_LED_VALUE_ACTIVITY; + + return ahci_transmit_led_message(ap, state, size); +} + +static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + u32 port_led_state = emp->led_state; + + /* save the desired Activity LED behavior */ + if (val == OFF) { + /* clear LFLAG */ + link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); + + /* set the LED to OFF */ + port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state |= (ap->port_no | (link->pmp << 8)); + ahci_transmit_led_message(ap, port_led_state, 4); + } else { + link->flags |= ATA_LFLAG_SW_ACTIVITY; + if (val == BLINK_OFF) { + /* set LED to ON for idle */ + port_led_state &= EM_MSG_LED_VALUE_OFF; + port_led_state |= (ap->port_no | (link->pmp << 8)); + port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ + ahci_transmit_led_message(ap, port_led_state, 4); + } + } + emp->blink_policy = val; + return 0; +} + +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* display the saved value of activity behavior for this + * disk. + */ + return sprintf(buf, "%d\n", emp->blink_policy); +} + +static void ahci_port_init(struct device *dev, struct ata_port *ap, + int port_no, void __iomem *mmio, + void __iomem *port_mmio) +{ + const char *emsg = NULL; + int rc; + u32 tmp; + + /* make sure port is not active */ + rc = ahci_deinit_port(ap, &emsg); + if (rc) + dev_warn(dev, "%s (%d)\n", emsg, rc); + + /* clear SError */ + tmp = readl(port_mmio + PORT_SCR_ERR); + VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); + writel(tmp, port_mmio + PORT_SCR_ERR); + + /* clear port IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); + if (tmp) + writel(tmp, port_mmio + PORT_IRQ_STAT); + + writel(1 << port_no, mmio + HOST_IRQ_STAT); +} + +void ahci_init_controller(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + int i; + void __iomem *port_mmio; + u32 tmp; + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; + + port_mmio = ahci_port_base(ap); + if (ata_port_is_dummy(ap)) + continue; + + ahci_port_init(host->dev, ap, i, mmio, port_mmio); + } + + tmp = readl(mmio + HOST_CTL); + VPRINTK("HOST_CTL 0x%x\n", tmp); + writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); + tmp = readl(mmio + HOST_CTL); + VPRINTK("HOST_CTL 0x%x\n", tmp); +} +EXPORT_SYMBOL_GPL(ahci_init_controller); + +static void ahci_dev_config(struct ata_device *dev) +{ + struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; + + if (hpriv->flags & AHCI_HFLAG_SECT255) { + dev->max_sectors = 255; + ata_dev_printk(dev, KERN_INFO, + "SB600 AHCI: limiting to 255 sectors per cmd\n"); + } +} + +static unsigned int ahci_dev_classify(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_taskfile tf; + u32 tmp; + + tmp = readl(port_mmio + PORT_SIG); + tf.lbah = (tmp >> 24) & 0xff; + tf.lbam = (tmp >> 16) & 0xff; + tf.lbal = (tmp >> 8) & 0xff; + tf.nsect = (tmp) & 0xff; + + return ata_dev_classify(&tf); +} + +static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, + u32 opts) +{ + dma_addr_t cmd_tbl_dma; + + cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; + + pp->cmd_slot[tag].opts = cpu_to_le32(opts); + pp->cmd_slot[tag].status = 0; + pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); + pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); +} + +int ahci_kick_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + u32 tmp; + int busy, rc; + + /* stop engine */ + rc = ahci_stop_engine(ap); + if (rc) + goto out_restart; + + /* need to do CLO? + * always do CLO if PMP is attached (AHCI-1.3 9.2) + */ + busy = status & (ATA_BUSY | ATA_DRQ); + if (!busy && !sata_pmp_attached(ap)) { + rc = 0; + goto out_restart; + } + + if (!(hpriv->cap & HOST_CAP_CLO)) { + rc = -EOPNOTSUPP; + goto out_restart; + } + + /* perform CLO */ + tmp = readl(port_mmio + PORT_CMD); + tmp |= PORT_CMD_CLO; + writel(tmp, port_mmio + PORT_CMD); + + rc = 0; + tmp = ata_wait_register(port_mmio + PORT_CMD, + PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); + if (tmp & PORT_CMD_CLO) + rc = -EIO; + + /* restart engine */ + out_restart: + ahci_start_engine(ap); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_kick_engine); + +static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, + struct ata_taskfile *tf, int is_cmd, u16 flags, + unsigned long timeout_msec) +{ + const u32 cmd_fis_len = 5; /* five dwords */ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u8 *fis = pp->cmd_tbl; + u32 tmp; + + /* prep the command */ + ata_tf_to_fis(tf, pmp, is_cmd, fis); + ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); + + /* issue & wait */ + writel(1, port_mmio + PORT_CMD_ISSUE); + + if (timeout_msec) { + tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, + 1, timeout_msec); + if (tmp & 0x1) { + ahci_kick_engine(ap); + return -EBUSY; + } + } else + readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + + return 0; +} + +int ahci_do_softreset(struct ata_link *link, unsigned int *class, + int pmp, unsigned long deadline, + int (*check_ready)(struct ata_link *link)) +{ + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + const char *reason = NULL; + unsigned long now, msecs; + struct ata_taskfile tf; + int rc; + + DPRINTK("ENTER\n"); + + /* prepare for SRST (AHCI-1.1 10.4.1) */ + rc = ahci_kick_engine(ap); + if (rc && rc != -EOPNOTSUPP) + ata_link_printk(link, KERN_WARNING, + "failed to reset engine (errno=%d)\n", rc); + + ata_tf_init(link->device, &tf); + + /* issue the first D2H Register FIS */ + msecs = 0; + now = jiffies; + if (time_after(now, deadline)) + msecs = jiffies_to_msecs(deadline - now); + + tf.ctl |= ATA_SRST; + if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, + AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { + rc = -EIO; + reason = "1st FIS failed"; + goto fail; + } + + /* spec says at least 5us, but be generous and sleep for 1ms */ + msleep(1); + + /* issue the second D2H Register FIS */ + tf.ctl &= ~ATA_SRST; + ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); + + /* wait for link to become ready */ + rc = ata_wait_after_reset(link, deadline, check_ready); + if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { + /* + * Workaround for cases where link online status can't + * be trusted. Treat device readiness timeout as link + * offline. + */ + ata_link_printk(link, KERN_INFO, + "device not ready, treating as offline\n"); + *class = ATA_DEV_NONE; + } else if (rc) { + /* link occupied, -ENODEV too is an error */ + reason = "device not ready"; + goto fail; + } else + *class = ahci_dev_classify(ap); + + DPRINTK("EXIT, class=%u\n", *class); + return 0; + + fail: + ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); + return rc; +} + +int ahci_check_ready(struct ata_link *link) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + + return ata_check_ready(status); +} +EXPORT_SYMBOL_GPL(ahci_check_ready); + +static int ahci_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int pmp = sata_srst_pmp(link); + + DPRINTK("ENTER\n"); + + return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); +} +EXPORT_SYMBOL_GPL(ahci_do_softreset); + +static int ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + struct ata_taskfile tf; + bool online; + int rc; + + DPRINTK("ENTER\n"); + + ahci_stop_engine(ap); + + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = 0x80; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + + rc = sata_link_hardreset(link, timing, deadline, &online, + ahci_check_ready); + + ahci_start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); + return rc; +} + +static void ahci_postreset(struct ata_link *link, unsigned int *class) +{ + struct ata_port *ap = link->ap; + void __iomem *port_mmio = ahci_port_base(ap); + u32 new_tmp, tmp; + + ata_std_postreset(link, class); + + /* Make sure port's ATAPI bit is set appropriately */ + new_tmp = tmp = readl(port_mmio + PORT_CMD); + if (*class == ATA_DEV_ATAPI) + new_tmp |= PORT_CMD_ATAPI; + else + new_tmp &= ~PORT_CMD_ATAPI; + if (new_tmp != tmp) { + writel(new_tmp, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); /* flush */ + } +} + +static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) +{ + struct scatterlist *sg; + struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; + unsigned int si; + + VPRINTK("ENTER\n"); + + /* + * Next, the S/G list. + */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_addr_t addr = sg_dma_address(sg); + u32 sg_len = sg_dma_len(sg); + + ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); + ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); + ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); + } + + return si; +} + +static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + + if (!sata_pmp_attached(ap) || pp->fbs_enabled) + return ata_std_qc_defer(qc); + else + return sata_pmp_qc_defer_cmd_switch(qc); +} + +static void ahci_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; + int is_atapi = ata_is_atapi(qc->tf.protocol); + void *cmd_tbl; + u32 opts; + const u32 cmd_fis_len = 5; /* five dwords */ + unsigned int n_elem; + + /* + * Fill in command table information. First, the header, + * a SATA Register - Host to Device command FIS. + */ + cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); + if (is_atapi) { + memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); + memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); + } + + n_elem = 0; + if (qc->flags & ATA_QCFLAG_DMAMAP) + n_elem = ahci_fill_sg(qc, cmd_tbl); + + /* + * Fill in command slot information. + */ + opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); + if (qc->tf.flags & ATA_TFLAG_WRITE) + opts |= AHCI_CMD_WRITE; + if (is_atapi) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); +} + +static void ahci_fbs_dec_intr(struct ata_port *ap) +{ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs = readl(port_mmio + PORT_FBS); + int retries = 3; + + DPRINTK("ENTER\n"); + BUG_ON(!pp->fbs_enabled); + + /* time to wait for DEC is not specified by AHCI spec, + * add a retry loop for safety. + */ + writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + while ((fbs & PORT_FBS_DEC) && retries--) { + udelay(1); + fbs = readl(port_mmio + PORT_FBS); + } + + if (fbs & PORT_FBS_DEC) + dev_printk(KERN_ERR, ap->host->dev, + "failed to clear device error\n"); +} + +static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + struct ata_eh_info *host_ehi = &ap->link.eh_info; + struct ata_link *link = NULL; + struct ata_queued_cmd *active_qc; + struct ata_eh_info *active_ehi; + bool fbs_need_dec = false; + u32 serror; + + /* determine active link with error */ + if (pp->fbs_enabled) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs = readl(port_mmio + PORT_FBS); + int pmp = fbs >> PORT_FBS_DWE_OFFSET; + + if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && + ata_link_online(&ap->pmp_link[pmp])) { + link = &ap->pmp_link[pmp]; + fbs_need_dec = true; + } + + } else + ata_for_each_link(link, ap, EDGE) + if (ata_link_active(link)) + break; + + if (!link) + link = &ap->link; + + active_qc = ata_qc_from_tag(ap, link->active_tag); + active_ehi = &link->eh_info; + + /* record irq stat */ + ata_ehi_clear_desc(host_ehi); + ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); + + /* AHCI needs SError cleared; otherwise, it might lock up */ + ahci_scr_read(&ap->link, SCR_ERROR, &serror); + ahci_scr_write(&ap->link, SCR_ERROR, serror); + host_ehi->serror |= serror; + + /* some controllers set IRQ_IF_ERR on device errors, ignore it */ + if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) + irq_stat &= ~PORT_IRQ_IF_ERR; + + if (irq_stat & PORT_IRQ_TF_ERR) { + /* If qc is active, charge it; otherwise, the active + * link. There's no active qc on NCQ errors. It will + * be determined by EH by reading log page 10h. + */ + if (active_qc) + active_qc->err_mask |= AC_ERR_DEV; + else + active_ehi->err_mask |= AC_ERR_DEV; + + if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) + host_ehi->serror &= ~SERR_INTERNAL; + } + + if (irq_stat & PORT_IRQ_UNK_FIS) { + u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); + + active_ehi->err_mask |= AC_ERR_HSM; + active_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(active_ehi, + "unknown FIS %08x %08x %08x %08x" , + unk[0], unk[1], unk[2], unk[3]); + } + + if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { + active_ehi->err_mask |= AC_ERR_HSM; + active_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(active_ehi, "incorrect PMP"); + } + + if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { + host_ehi->err_mask |= AC_ERR_HOST_BUS; + host_ehi->action |= ATA_EH_RESET; + ata_ehi_push_desc(host_ehi, "host bus error"); + } + + if (irq_stat & PORT_IRQ_IF_ERR) { + if (fbs_need_dec) + active_ehi->err_mask |= AC_ERR_DEV; + else { + host_ehi->err_mask |= AC_ERR_ATA_BUS; + host_ehi->action |= ATA_EH_RESET; + } + + ata_ehi_push_desc(host_ehi, "interface fatal error"); + } + + if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { + ata_ehi_hotplugged(host_ehi); + ata_ehi_push_desc(host_ehi, "%s", + irq_stat & PORT_IRQ_CONNECT ? + "connection status changed" : "PHY RDY changed"); + } + + /* okay, let's hand over to EH */ + + if (irq_stat & PORT_IRQ_FREEZE) + ata_port_freeze(ap); + else if (fbs_need_dec) { + ata_link_abort(link); + ahci_fbs_dec_intr(ap); + } else + ata_port_abort(ap); +} + +static void ahci_port_intr(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_eh_info *ehi = &ap->link.eh_info; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; + int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); + u32 status, qc_active = 0; + int rc; + + status = readl(port_mmio + PORT_IRQ_STAT); + writel(status, port_mmio + PORT_IRQ_STAT); + + /* ignore BAD_PMP while resetting */ + if (unlikely(resetting)) + status &= ~PORT_IRQ_BAD_PMP; + + /* If we are getting PhyRdy, this is + * just a power state change, we should + * clear out this, plus the PhyRdy/Comm + * Wake bits from Serror + */ + if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && + (status & PORT_IRQ_PHYRDY)) { + status &= ~PORT_IRQ_PHYRDY; + ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); + } + + if (unlikely(status & PORT_IRQ_ERROR)) { + ahci_error_intr(ap, status); + return; + } + + if (status & PORT_IRQ_SDB_FIS) { + /* If SNotification is available, leave notification + * handling to sata_async_notification(). If not, + * emulate it by snooping SDB FIS RX area. + * + * Snooping FIS RX area is probably cheaper than + * poking SNotification but some constrollers which + * implement SNotification, ICH9 for example, don't + * store AN SDB FIS into receive area. + */ + if (hpriv->cap & HOST_CAP_SNTF) + sata_async_notification(ap); + else { + /* If the 'N' bit in word 0 of the FIS is set, + * we just received asynchronous notification. + * Tell libata about it. + * + * Lack of SNotification should not appear in + * ahci 1.2, so the workaround is unnecessary + * when FBS is enabled. + */ + if (pp->fbs_enabled) + WARN_ON_ONCE(1); + else { + const __le32 *f = pp->rx_fis + RX_FIS_SDB; + u32 f0 = le32_to_cpu(f[0]); + if (f0 & (1 << 15)) + sata_async_notification(ap); + } + } + } + + /* pp->active_link is not reliable once FBS is enabled, both + * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because + * NCQ and non-NCQ commands may be in flight at the same time. + */ + if (pp->fbs_enabled) { + if (ap->qc_active) { + qc_active = readl(port_mmio + PORT_SCR_ACT); + qc_active |= readl(port_mmio + PORT_CMD_ISSUE); + } + } else { + /* pp->active_link is valid iff any command is in flight */ + if (ap->qc_active && pp->active_link->sactive) + qc_active = readl(port_mmio + PORT_SCR_ACT); + else + qc_active = readl(port_mmio + PORT_CMD_ISSUE); + } + + + rc = ata_qc_complete_multiple(ap, qc_active); + + /* while resetting, invalid completions are expected */ + if (unlikely(rc < 0 && !resetting)) { + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_RESET; + ata_port_freeze(ap); + } +} + +irqreturn_t ahci_interrupt(int irq, void *dev_instance) +{ + struct ata_host *host = dev_instance; + struct ahci_host_priv *hpriv; + unsigned int i, handled = 0; + void __iomem *mmio; + u32 irq_stat, irq_masked; + + VPRINTK("ENTER\n"); + + hpriv = host->private_data; + mmio = hpriv->mmio; + + /* sigh. 0xffffffff is a valid return from h/w */ + irq_stat = readl(mmio + HOST_IRQ_STAT); + if (!irq_stat) + return IRQ_NONE; + + irq_masked = irq_stat & hpriv->port_map; + + spin_lock(&host->lock); + + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap; + + if (!(irq_masked & (1 << i))) + continue; + + ap = host->ports[i]; + if (ap) { + ahci_port_intr(ap); + VPRINTK("port %u\n", i); + } else { + VPRINTK("port %u (no irq)\n", i); + if (ata_ratelimit()) + dev_printk(KERN_WARNING, host->dev, + "interrupt on disabled port %u\n", i); + } + + handled = 1; + } + + /* HOST_IRQ_STAT behaves as level triggered latch meaning that + * it should be cleared after all the port events are cleared; + * otherwise, it will raise a spurious interrupt after each + * valid one. Please read section 10.6.2 of ahci 1.1 for more + * information. + * + * Also, use the unmasked value to clear interrupt as spurious + * pending event on a dummy port might cause screaming IRQ. + */ + writel(irq_stat, mmio + HOST_IRQ_STAT); + + spin_unlock(&host->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} +EXPORT_SYMBOL_GPL(ahci_interrupt); + +static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + + /* Keep track of the currently active link. It will be used + * in completion path to determine whether NCQ phase is in + * progress. + */ + pp->active_link = qc->dev->link; + + if (qc->tf.protocol == ATA_PROT_NCQ) + writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); + + if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { + u32 fbs = readl(port_mmio + PORT_FBS); + fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); + fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; + writel(fbs, port_mmio + PORT_FBS); + pp->fbs_last_dev = qc->dev->link->pmp; + } + + writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); + + ahci_sw_activity(qc->dev->link); + + return 0; +} + +static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) +{ + struct ahci_port_priv *pp = qc->ap->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + + if (pp->fbs_enabled) + d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + + ata_tf_from_fis(d2h_fis, &qc->result_tf); + return true; +} + +static void ahci_freeze(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + + /* turn IRQ off */ + writel(0, port_mmio + PORT_IRQ_MASK); +} + +static void ahci_thaw(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *mmio = hpriv->mmio; + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp; + struct ahci_port_priv *pp = ap->private_data; + + /* clear IRQ */ + tmp = readl(port_mmio + PORT_IRQ_STAT); + writel(tmp, port_mmio + PORT_IRQ_STAT); + writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); + + /* turn IRQ back on */ + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +static void ahci_error_handler(struct ata_port *ap) +{ + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { + /* restart engine */ + ahci_stop_engine(ap); + ahci_start_engine(ap); + } + + sata_pmp_error_handler(ap); + + if (!ata_dev_enabled(ap->link.device)) + ahci_stop_engine(ap); +} + +static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* make DMA engine forget about the failed command */ + if (qc->flags & ATA_QCFLAG_FAILED) + ahci_kick_engine(ap); +} + +static void ahci_enable_fbs(struct ata_port *ap) +{ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs; + int rc; + + if (!pp->fbs_supported) + return; + + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) { + pp->fbs_enabled = true; + pp->fbs_last_dev = -1; /* initialization */ + return; + } + + rc = ahci_stop_engine(ap); + if (rc) + return; + + writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) { + dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n"); + pp->fbs_enabled = true; + pp->fbs_last_dev = -1; /* initialization */ + } else + dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n"); + + ahci_start_engine(ap); +} + +static void ahci_disable_fbs(struct ata_port *ap) +{ + struct ahci_port_priv *pp = ap->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + u32 fbs; + int rc; + + if (!pp->fbs_supported) + return; + + fbs = readl(port_mmio + PORT_FBS); + if ((fbs & PORT_FBS_EN) == 0) { + pp->fbs_enabled = false; + return; + } + + rc = ahci_stop_engine(ap); + if (rc) + return; + + writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS); + fbs = readl(port_mmio + PORT_FBS); + if (fbs & PORT_FBS_EN) + dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n"); + else { + dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n"); + pp->fbs_enabled = false; + } + + ahci_start_engine(ap); +} + +static void ahci_pmp_attach(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + u32 cmd; + + cmd = readl(port_mmio + PORT_CMD); + cmd |= PORT_CMD_PMP; + writel(cmd, port_mmio + PORT_CMD); + + ahci_enable_fbs(ap); + + pp->intr_mask |= PORT_IRQ_BAD_PMP; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +static void ahci_pmp_detach(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_port_priv *pp = ap->private_data; + u32 cmd; + + ahci_disable_fbs(ap); + + cmd = readl(port_mmio + PORT_CMD); + cmd &= ~PORT_CMD_PMP; + writel(cmd, port_mmio + PORT_CMD); + + pp->intr_mask &= ~PORT_IRQ_BAD_PMP; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); +} + +static int ahci_port_resume(struct ata_port *ap) +{ + ahci_power_up(ap); + ahci_start_port(ap); + + if (sata_pmp_attached(ap)) + ahci_pmp_attach(ap); + else + ahci_pmp_detach(ap); + + return 0; +} + +#ifdef CONFIG_PM +static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) +{ + const char *emsg = NULL; + int rc; + + rc = ahci_deinit_port(ap, &emsg); + if (rc == 0) + ahci_power_down(ap); + else { + ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); + ahci_start_port(ap); + } + + return rc; +} +#endif + +static int ahci_port_start(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct device *dev = ap->host->dev; + struct ahci_port_priv *pp; + void *mem; + dma_addr_t mem_dma; + size_t dma_sz, rx_fis_sz; + + pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + + /* check FBS capability */ + if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { + void __iomem *port_mmio = ahci_port_base(ap); + u32 cmd = readl(port_mmio + PORT_CMD); + if (cmd & PORT_CMD_FBSCP) + pp->fbs_supported = true; + else + dev_printk(KERN_WARNING, dev, + "The port is not capable of FBS\n"); + } + + if (pp->fbs_supported) { + dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; + rx_fis_sz = AHCI_RX_FIS_SZ * 16; + } else { + dma_sz = AHCI_PORT_PRIV_DMA_SZ; + rx_fis_sz = AHCI_RX_FIS_SZ; + } + + mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + memset(mem, 0, dma_sz); + + /* + * First item in chunk of DMA memory: 32-slot command table, + * 32 bytes each in size + */ + pp->cmd_slot = mem; + pp->cmd_slot_dma = mem_dma; + + mem += AHCI_CMD_SLOT_SZ; + mem_dma += AHCI_CMD_SLOT_SZ; + + /* + * Second item: Received-FIS area + */ + pp->rx_fis = mem; + pp->rx_fis_dma = mem_dma; + + mem += rx_fis_sz; + mem_dma += rx_fis_sz; + + /* + * Third item: data area for storing a single command + * and its scatter-gather table + */ + pp->cmd_tbl = mem; + pp->cmd_tbl_dma = mem_dma; + + /* + * Save off initial list of interrupts to be enabled. + * This could be changed later + */ + pp->intr_mask = DEF_PORT_IRQ; + + ap->private_data = pp; + + /* engage engines, captain */ + return ahci_port_resume(ap); +} + +static void ahci_port_stop(struct ata_port *ap) +{ + const char *emsg = NULL; + int rc; + + /* de-initialize port */ + rc = ahci_deinit_port(ap, &emsg); + if (rc) + ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); +} + +void ahci_print_info(struct ata_host *host, const char *scc_s) +{ + struct ahci_host_priv *hpriv = host->private_data; + void __iomem *mmio = hpriv->mmio; + u32 vers, cap, cap2, impl, speed; + const char *speed_s; + + vers = readl(mmio + HOST_VERSION); + cap = hpriv->cap; + cap2 = hpriv->cap2; + impl = hpriv->port_map; + + speed = (cap >> 20) & 0xf; + if (speed == 1) + speed_s = "1.5"; + else if (speed == 2) + speed_s = "3"; + else if (speed == 3) + speed_s = "6"; + else + speed_s = "?"; + + dev_info(host->dev, + "AHCI %02x%02x.%02x%02x " + "%u slots %u ports %s Gbps 0x%x impl %s mode\n" + , + + (vers >> 24) & 0xff, + (vers >> 16) & 0xff, + (vers >> 8) & 0xff, + vers & 0xff, + + ((cap >> 8) & 0x1f) + 1, + (cap & 0x1f) + 1, + speed_s, + impl, + scc_s); + + dev_info(host->dev, + "flags: " + "%s%s%s%s%s%s%s" + "%s%s%s%s%s%s%s" + "%s%s%s%s%s%s\n" + , + + cap & HOST_CAP_64 ? "64bit " : "", + cap & HOST_CAP_NCQ ? "ncq " : "", + cap & HOST_CAP_SNTF ? "sntf " : "", + cap & HOST_CAP_MPS ? "ilck " : "", + cap & HOST_CAP_SSS ? "stag " : "", + cap & HOST_CAP_ALPM ? "pm " : "", + cap & HOST_CAP_LED ? "led " : "", + cap & HOST_CAP_CLO ? "clo " : "", + cap & HOST_CAP_ONLY ? "only " : "", + cap & HOST_CAP_PMP ? "pmp " : "", + cap & HOST_CAP_FBS ? "fbs " : "", + cap & HOST_CAP_PIO_MULTI ? "pio " : "", + cap & HOST_CAP_SSC ? "slum " : "", + cap & HOST_CAP_PART ? "part " : "", + cap & HOST_CAP_CCC ? "ccc " : "", + cap & HOST_CAP_EMS ? "ems " : "", + cap & HOST_CAP_SXS ? "sxs " : "", + cap2 & HOST_CAP2_APST ? "apst " : "", + cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "", + cap2 & HOST_CAP2_BOH ? "boh " : "" + ); +} +EXPORT_SYMBOL_GPL(ahci_print_info); + +void ahci_set_em_messages(struct ahci_host_priv *hpriv, + struct ata_port_info *pi) +{ + u8 messages; + void __iomem *mmio = hpriv->mmio; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + + if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS)) + return; + + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + + if (messages) { + /* store em_loc */ + hpriv->em_loc = ((em_loc >> 16) * 4); + hpriv->em_buf_sz = ((em_loc & 0xff) * 4); + hpriv->em_msg_type = messages; + pi->flags |= ATA_FLAG_EM; + if (!(em_ctl & EM_CTL_ALHD)) + pi->flags |= ATA_FLAG_SW_ACTIVITY; + } +} +EXPORT_SYMBOL_GPL(ahci_set_em_messages); + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Common AHCI SATA low-level routines"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 49cffb6094a..ddf8e486278 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -65,6 +65,7 @@ #include <linux/libata.h> #include <asm/byteorder.h> #include <linux/cdrom.h> +#include <linux/ratelimit.h> #include "libata.h" @@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); unsigned int ata_print_id = 1; -static struct workqueue_struct *ata_wq; struct workqueue_struct *ata_aux_wq; @@ -160,6 +160,10 @@ int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); @@ -1685,52 +1689,6 @@ unsigned long ata_id_xfermask(const u16 *id) return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } -/** - * ata_pio_queue_task - Queue port_task - * @ap: The ata_port to queue port_task for - * @data: data for @fn to use - * @delay: delay time in msecs for workqueue function - * - * Schedule @fn(@data) for execution after @delay jiffies using - * port_task. There is one port_task per port and it's the - * user(low level driver)'s responsibility to make sure that only - * one task is active at any given time. - * - * libata core layer takes care of synchronization between - * port_task and EH. ata_pio_queue_task() may be ignored for EH - * synchronization. - * - * LOCKING: - * Inherited from caller. - */ -void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) -{ - ap->port_task_data = data; - - /* may fail if ata_port_flush_task() in progress */ - queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay)); -} - -/** - * ata_port_flush_task - Flush port_task - * @ap: The ata_port to flush port_task for - * - * After this function completes, port_task is guranteed not to - * be running or scheduled. - * - * LOCKING: - * Kernel thread context (may sleep) - */ -void ata_port_flush_task(struct ata_port *ap) -{ - DPRINTK("ENTER\n"); - - cancel_rearming_delayed_work(&ap->port_task); - - if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); -} - static void ata_qc_complete_internal(struct ata_queued_cmd *qc) { struct completion *waiting = qc->private_data; @@ -1852,7 +1810,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); - ata_port_flush_task(ap); + ata_sff_flush_pio_task(ap); if (!rc) { spin_lock_irqsave(ap->lock, flags); @@ -1906,22 +1864,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ap->qc_active = preempted_qc_active; ap->nr_active_links = preempted_nr_active_links; - /* XXX - Some LLDDs (sata_mv) disable port on command failure. - * Until those drivers are fixed, we detect the condition - * here, fail the command with AC_ERR_SYSTEM and reenable the - * port. - * - * Note that this doesn't change any behavior as internal - * command failure results in disabling the device in the - * higher layer for LLDDs without new reset/EH callbacks. - * - * Kill the following code as soon as those drivers are fixed. - */ - if (ap->flags & ATA_FLAG_DISABLED) { - err_mask |= AC_ERR_SYSTEM; - ata_port_probe(ap); - } - spin_unlock_irqrestore(ap->lock, flags); if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) @@ -2184,6 +2126,14 @@ retry: goto err_out; } + if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " + "class=%d may_fallback=%d tried_spinup=%d\n", + class, may_fallback, tried_spinup); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); + } + /* Falling back doesn't make sense if ID data was read * successfully at least once. */ @@ -2572,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev) * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; @@ -2767,8 +2718,6 @@ int ata_bus_probe(struct ata_port *ap) int rc; struct ata_device *dev; - ata_port_probe(ap); - ata_for_each_dev(dev, &ap->link, ALL) tries[dev->devno] = ATA_PROBE_MAX_TRIES; @@ -2796,8 +2745,7 @@ int ata_bus_probe(struct ata_port *ap) ap->ops->phy_reset(ap); ata_for_each_dev(dev, &ap->link, ALL) { - if (!(ap->flags & ATA_FLAG_DISABLED) && - dev->class != ATA_DEV_UNKNOWN) + if (dev->class != ATA_DEV_UNKNOWN) classes[dev->devno] = dev->class; else classes[dev->devno] = ATA_DEV_NONE; @@ -2805,8 +2753,6 @@ int ata_bus_probe(struct ata_port *ap) dev->class = ATA_DEV_UNKNOWN; } - ata_port_probe(ap); - /* read IDENTIFY page and configure devices. We have to do the identify specific sequence bass-ackwards so that PDIAG- is released by the slave device */ @@ -2856,8 +2802,6 @@ int ata_bus_probe(struct ata_port *ap) ata_for_each_dev(dev, &ap->link, ENABLED) return 0; - /* no device present, disable port */ - ata_port_disable(ap); return -ENODEV; fail: @@ -2889,22 +2833,6 @@ int ata_bus_probe(struct ata_port *ap) } /** - * ata_port_probe - Mark port as enabled - * @ap: Port for which we indicate enablement - * - * Modify @ap data structure such that the system - * thinks that the entire port is enabled. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_probe(struct ata_port *ap) -{ - ap->flags &= ~ATA_FLAG_DISABLED; -} - -/** * sata_print_link_status - Print SATA link status * @link: SATA link to printk link status about * @@ -2951,26 +2879,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev) } /** - * ata_port_disable - Disable port. - * @ap: Port to be disabled. - * - * Modify @ap data structure such that the system - * thinks that the entire port is disabled, and should - * never attempt to probe or communicate with devices - * on this port. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_disable(struct ata_port *ap) -{ - ap->link.device[0].class = ATA_DEV_NONE; - ap->link.device[1].class = ATA_DEV_NONE; - ap->flags |= ATA_FLAG_DISABLED; -} - -/** * sata_down_spd_limit - adjust SATA spd limit downward * @link: Link to adjust SATA spd limit for * @spd_limit: Additional limit @@ -3631,9 +3539,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { unsigned long start = jiffies; - unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); + unsigned long nodev_deadline; int warned = 0; + /* choose which 0xff timeout to use, read comment in libata.h */ + if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); + else + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); + /* Slave readiness can't be tested separately from master. On * M/S emulation configuration, this function should be called * only on the master and it will handle both master and slave. @@ -3651,12 +3565,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, if (ready > 0) return 0; - /* -ENODEV could be transient. Ignore -ENODEV if link + /* + * -ENODEV could be transient. Ignore -ENODEV if link * is online. Also, some SATA devices take a long - * time to clear 0xff after reset. For example, - * HHD424020F7SV00 iVDR needs >= 800ms while Quantum - * GoVault needs even more than that. Wait for - * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline. + * time to clear 0xff after reset. Wait for + * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't + * offline. * * Note that some PATA controllers (pata_ali) explode * if status register is read more than once when @@ -4205,9 +4119,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { ata_dev_printk(dev, KERN_WARNING, "new n_sectors matches native, probably " - "late HPA unlock, continuing\n"); - /* keep using the old n_sectors */ - dev->n_sectors = n_sectors; + "late HPA unlock, n_sectors updated\n"); + /* use the larger n_sectors */ return 0; } @@ -5558,30 +5471,6 @@ void ata_host_resume(struct ata_host *host) #endif /** - * ata_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table. - * - * May be used as the port_start() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -int ata_port_start(struct ata_port *ap) -{ - struct device *dev = ap->dev; - - ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, - GFP_KERNEL); - if (!ap->prd) - return -ENOMEM; - - return 0; -} - -/** * ata_dev_init - Initialize an ata_device structure * @dev: Device structure to initialize * @@ -5709,12 +5598,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->pflags |= ATA_PFLAG_INITIALIZING; ap->lock = &host->lock; - ap->flags = ATA_FLAG_DISABLED; ap->print_id = -1; - ap->ctl = ATA_DEVCTL_OBS; ap->host = host; ap->dev = host->dev; - ap->last_ctl = 0xFF; #if defined(ATA_VERBOSE_DEBUG) /* turn on all debugging levels */ @@ -5725,11 +5611,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif -#ifdef CONFIG_ATA_SFF - INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); -#else - INIT_DELAYED_WORK(&ap->port_task, NULL); -#endif INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); @@ -5747,6 +5628,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->stats.unhandled_irq = 1; ap->stats.idle_irq = 1; #endif + ata_sff_port_init(ap); + return ap; } @@ -6138,8 +6021,6 @@ static void async_port_probe(void *data, async_cookie_t cookie) struct ata_eh_info *ehi = &ap->link.eh_info; unsigned long flags; - ata_port_probe(ap); - /* kick EH for boot probing */ spin_lock_irqsave(ap->lock, flags); @@ -6503,6 +6384,7 @@ static int __init ata_parse_force_one(char **cur, { "3.0Gbps", .spd_limit = 2 }, { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, + { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, @@ -6663,62 +6545,43 @@ static void __init ata_parse_force_param(void) static int __init ata_init(void) { - ata_parse_force_param(); + int rc = -ENOMEM; - /* - * FIXME: In UP case, there is only one workqueue thread and if you - * have more than one PIO device, latency is bloody awful, with - * occasional multi-second "hiccups" as one PIO device waits for - * another. It's an ugly wart that users DO occasionally complain - * about; luckily most users have at most one PIO polled device. - */ - ata_wq = create_workqueue("ata"); - if (!ata_wq) - goto free_force_tbl; + ata_parse_force_param(); ata_aux_wq = create_singlethread_workqueue("ata_aux"); if (!ata_aux_wq) - goto free_wq; + goto fail; + + rc = ata_sff_init(); + if (rc) + goto fail; printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; -free_wq: - destroy_workqueue(ata_wq); -free_force_tbl: +fail: kfree(ata_force_tbl); - return -ENOMEM; + if (ata_aux_wq) + destroy_workqueue(ata_aux_wq); + return rc; } static void __exit ata_exit(void) { + ata_sff_exit(); kfree(ata_force_tbl); - destroy_workqueue(ata_wq); destroy_workqueue(ata_aux_wq); } subsys_initcall(ata_init); module_exit(ata_exit); -static unsigned long ratelimit_time; -static DEFINE_SPINLOCK(ata_ratelimit_lock); +static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); int ata_ratelimit(void) { - int rc; - unsigned long flags; - - spin_lock_irqsave(&ata_ratelimit_lock, flags); - - if (time_after(jiffies, ratelimit_time)) { - rc = 1; - ratelimit_time = jiffies + (HZ/5); - } else - rc = 0; - - spin_unlock_irqrestore(&ata_ratelimit_lock, flags); - - return rc; + return __ratelimit(&ratelimit); } /** @@ -6805,6 +6668,7 @@ EXPORT_SYMBOL_GPL(ata_dummy_port_info); EXPORT_SYMBOL_GPL(ata_link_next); EXPORT_SYMBOL_GPL(ata_dev_next); EXPORT_SYMBOL_GPL(ata_std_bios_param); +EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); EXPORT_SYMBOL_GPL(ata_host_init); EXPORT_SYMBOL_GPL(ata_host_alloc); EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); @@ -6826,11 +6690,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); EXPORT_SYMBOL_GPL(ata_mode_string); EXPORT_SYMBOL_GPL(ata_id_xfermask); -EXPORT_SYMBOL_GPL(ata_port_start); EXPORT_SYMBOL_GPL(ata_do_set_mode); EXPORT_SYMBOL_GPL(ata_std_qc_defer); EXPORT_SYMBOL_GPL(ata_noop_qc_prep); -EXPORT_SYMBOL_GPL(ata_port_probe); EXPORT_SYMBOL_GPL(ata_dev_disable); EXPORT_SYMBOL_GPL(sata_set_spd); EXPORT_SYMBOL_GPL(ata_wait_after_reset); @@ -6842,7 +6704,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset); EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_dev_pair); -EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_wait_register); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); @@ -6864,7 +6725,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_do_dev_read_id); EXPORT_SYMBOL_GPL(ata_scsi_simulate); -EXPORT_SYMBOL_GPL(ata_pio_queue_task); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); EXPORT_SYMBOL_GPL(ata_timing_find_mode); EXPORT_SYMBOL_GPL(ata_timing_compute); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 228740f356c..f77a67303f8 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host) DPRINTK("ENTER\n"); - /* synchronize with port task */ - ata_port_flush_task(ap); + /* make sure sff pio task is not running */ + ata_sff_flush_pio_task(ap); /* synchronize with host lock and sort out timeouts */ @@ -3684,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap) ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ - if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) + if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 00305f41ed8..224faabd7b7 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr) return "<unknown>"; } +#define PMP_GSCR_SII_POL 129 + static int sata_pmp_configure(struct ata_device *dev, int print_info) { struct ata_port *ap = dev->link->ap; u32 *gscr = dev->gscr; + u16 vendor = sata_pmp_gscr_vendor(gscr); + u16 devid = sata_pmp_gscr_devid(gscr); unsigned int err_mask = 0; const char *reason; int nr_ports, rc; @@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) goto fail; } + /* Disable sending Early R_OK. + * With "cached read" HDD testing and multiple ports busy on a SATA + * host controller, 3726 PMP will very rarely drop a deferred + * R_OK that was intended for the host. Symptom will be all + * 5 drives under test will timeout, get reset, and recover. + */ + if (vendor == 0x1095 && devid == 0x3726) { + u32 reg; + + err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); + if (err_mask) { + rc = -EIO; + reason = "failed to read Sil3726 Private Register"; + goto fail; + } + reg &= ~0x1; + err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); + if (err_mask) { + rc = -EIO; + reason = "failed to write Sil3726 Private Register"; + goto fail; + } + } + if (print_info) { ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, " "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n", - sata_pmp_spec_rev_str(gscr), - sata_pmp_gscr_vendor(gscr), - sata_pmp_gscr_devid(gscr), + sata_pmp_spec_rev_str(gscr), vendor, devid, sata_pmp_gscr_rev(gscr), nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN], gscr[SATA_PMP_GSCR_FEAT]); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0088cdeb0b1..a54273d2c3c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -415,6 +415,35 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, } /** + * ata_scsi_unlock_native_capacity - unlock native capacity + * @sdev: SCSI device to adjust device capacity for + * + * This function is called if a partition on @sdev extends beyond + * the end of the device. It requests EH to unlock HPA. + * + * LOCKING: + * Defined by the SCSI layer. Might sleep. + */ +void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *dev; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + + dev = ata_scsi_find_dev(ap, sdev); + if (dev && dev->n_sectors < dev->n_native_sectors) { + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + dev->link->eh_info.action |= ATA_EH_RESET; + ata_port_schedule_eh(ap); + } + + spin_unlock_irqrestore(ap->lock, flags); + ata_port_wait_eh(ap); +} + +/** * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl * @ap: target port * @sdev: SCSI device to get identify data for @@ -3345,9 +3374,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) struct ata_link *link; struct ata_device *dev; - if (ap->flags & ATA_FLAG_DISABLED) - return; - repeat: ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ENABLED) { diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index e3877b6843c..efa4a18cfb9 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -40,10 +40,12 @@ #include "libata.h" +static struct workqueue_struct *ata_sff_wq; + const struct ata_port_operations ata_sff_port_ops = { .inherits = &ata_base_port_ops, - .qc_prep = ata_sff_qc_prep, + .qc_prep = ata_noop_qc_prep, .qc_issue = ata_sff_qc_issue, .qc_fill_rtf = ata_sff_qc_fill_rtf, @@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = { .softreset = ata_sff_softreset, .hardreset = sata_sff_hardreset, .postreset = ata_sff_postreset, - .drain_fifo = ata_sff_drain_fifo, .error_handler = ata_sff_error_handler, - .post_internal_cmd = ata_sff_post_internal_cmd, .sff_dev_select = ata_sff_dev_select, .sff_check_status = ata_sff_check_status, @@ -63,178 +63,12 @@ const struct ata_port_operations ata_sff_port_ops = { .sff_tf_read = ata_sff_tf_read, .sff_exec_command = ata_sff_exec_command, .sff_data_xfer = ata_sff_data_xfer, - .sff_irq_on = ata_sff_irq_on, - .sff_irq_clear = ata_sff_irq_clear, + .sff_drain_fifo = ata_sff_drain_fifo, .lost_interrupt = ata_sff_lost_interrupt, - - .port_start = ata_sff_port_start, }; EXPORT_SYMBOL_GPL(ata_sff_port_ops); -const struct ata_port_operations ata_bmdma_port_ops = { - .inherits = &ata_sff_port_ops, - - .mode_filter = ata_bmdma_mode_filter, - - .bmdma_setup = ata_bmdma_setup, - .bmdma_start = ata_bmdma_start, - .bmdma_stop = ata_bmdma_stop, - .bmdma_status = ata_bmdma_status, -}; -EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); - -const struct ata_port_operations ata_bmdma32_port_ops = { - .inherits = &ata_bmdma_port_ops, - - .sff_data_xfer = ata_sff_data_xfer32, - .port_start = ata_sff_port_start32, -}; -EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); - -/** - * ata_fill_sg - Fill PCI IDE PRD table - * @qc: Metadata associated with taskfile to be transferred - * - * Fill PCI IDE PRD (scatter-gather) table with segments - * associated with the current disk command. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - */ -static void ata_fill_sg(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int si, pi; - - pi = 0; - for_each_sg(qc->sg, sg, qc->n_elem, si) { - u32 addr, offset; - u32 sg_len, len; - - /* determine if physical DMA addr spans 64K boundary. - * Note h/w doesn't support 64-bit, so we unconditionally - * truncate dma_addr_t to u32. - */ - addr = (u32) sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - while (sg_len) { - offset = addr & 0xffff; - len = sg_len; - if ((offset + sg_len) > 0x10000) - len = 0x10000 - offset; - - ap->prd[pi].addr = cpu_to_le32(addr); - ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); - - pi++; - sg_len -= len; - addr += len; - } - } - - ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); -} - -/** - * ata_fill_sg_dumb - Fill PCI IDE PRD table - * @qc: Metadata associated with taskfile to be transferred - * - * Fill PCI IDE PRD (scatter-gather) table with segments - * associated with the current disk command. Perform the fill - * so that we avoid writing any length 64K records for - * controllers that don't follow the spec. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - */ -static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int si, pi; - - pi = 0; - for_each_sg(qc->sg, sg, qc->n_elem, si) { - u32 addr, offset; - u32 sg_len, len, blen; - - /* determine if physical DMA addr spans 64K boundary. - * Note h/w doesn't support 64-bit, so we unconditionally - * truncate dma_addr_t to u32. - */ - addr = (u32) sg_dma_address(sg); - sg_len = sg_dma_len(sg); - - while (sg_len) { - offset = addr & 0xffff; - len = sg_len; - if ((offset + sg_len) > 0x10000) - len = 0x10000 - offset; - - blen = len & 0xffff; - ap->prd[pi].addr = cpu_to_le32(addr); - if (blen == 0) { - /* Some PATA chipsets like the CS5530 can't - cope with 0x0000 meaning 64K as the spec - says */ - ap->prd[pi].flags_len = cpu_to_le32(0x8000); - blen = 0x8000; - ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); - } - ap->prd[pi].flags_len = cpu_to_le32(blen); - VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); - - pi++; - sg_len -= len; - addr += len; - } - } - - ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); -} - -/** - * ata_sff_qc_prep - Prepare taskfile for submission - * @qc: Metadata associated with taskfile to be prepared - * - * Prepare ATA taskfile for submission. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_qc_prep(struct ata_queued_cmd *qc) -{ - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; - - ata_fill_sg(qc); -} -EXPORT_SYMBOL_GPL(ata_sff_qc_prep); - -/** - * ata_sff_dumb_qc_prep - Prepare taskfile for submission - * @qc: Metadata associated with taskfile to be prepared - * - * Prepare ATA taskfile for submission. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) -{ - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; - - ata_fill_sg_dumb(qc); -} -EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); - /** * ata_sff_check_status - Read device status reg & clear interrupt * @ap: port where the device is @@ -446,6 +280,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) EXPORT_SYMBOL_GPL(ata_sff_wait_ready); /** + * ata_sff_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + * + * Writes ATA taskfile device control register. + * + * Note: may NOT be used as the sff_set_devctl() entry in + * ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) +{ + if (ap->ops->sff_set_devctl) + ap->ops->sff_set_devctl(ap, ctl); + else + iowrite8(ctl, ap->ioaddr.ctl_addr); +} + +/** * ata_sff_dev_select - Select device 0/1 on ATA bus * @ap: ATA channel to manipulate * @device: ATA device (numbered from zero) to select @@ -491,7 +346,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select); * LOCKING: * caller. */ -void ata_dev_select(struct ata_port *ap, unsigned int device, +static void ata_dev_select(struct ata_port *ap, unsigned int device, unsigned int wait, unsigned int can_sleep) { if (ata_msg_probe(ap)) @@ -517,50 +372,34 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, * Enable interrupts on a legacy IDE device using MMIO or PIO, * wait for idle, clear any pending interrupts. * + * Note: may NOT be used as the sff_irq_on() entry in + * ata_port_operations. + * * LOCKING: * Inherited from caller. */ -u8 ata_sff_irq_on(struct ata_port *ap) +void ata_sff_irq_on(struct ata_port *ap) { struct ata_ioports *ioaddr = &ap->ioaddr; - u8 tmp; + + if (ap->ops->sff_irq_on) { + ap->ops->sff_irq_on(ap); + return; + } ap->ctl &= ~ATA_NIEN; ap->last_ctl = ap->ctl; - if (ioaddr->ctl_addr) - iowrite8(ap->ctl, ioaddr->ctl_addr); - tmp = ata_wait_idle(ap); - - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); + ata_wait_idle(ap); - return tmp; + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_irq_on); /** - * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Clear interrupt and error flags in DMA status register. - * - * May be used as the irq_clear() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_irq_clear(struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - if (!mmio) - return; - - iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); -} -EXPORT_SYMBOL_GPL(ata_sff_irq_clear); - -/** * ata_sff_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set @@ -579,7 +418,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) if (ioaddr->ctl_addr) iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; - ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { @@ -615,8 +453,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->device, ioaddr->device_addr); VPRINTK("device 0x%X\n", tf->device); } - - ata_wait_idle(ap); } EXPORT_SYMBOL_GPL(ata_sff_tf_load); @@ -894,7 +730,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } - if (!do_write) + if (!do_write && !PageSlab(page)) flush_dcache_page(page); qc->curbytes += qc->sect_size; @@ -962,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_LAST; break; +#ifdef CONFIG_ATA_BMDMA case ATAPI_PROT_DMA: ap->hsm_task_state = HSM_ST_LAST; /* initiate bmdma */ ap->ops->bmdma_start(qc); break; +#endif /* CONFIG_ATA_BMDMA */ + default: + BUG(); } } @@ -1165,7 +1005,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) qc = ata_qc_from_tag(ap, qc->tag); if (qc) { if (likely(!(qc->err_mask & AC_ERR_HSM))) { - ap->ops->sff_irq_on(ap); + ata_sff_irq_on(ap); ata_qc_complete(qc); } else ata_port_freeze(ap); @@ -1181,7 +1021,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } else { if (in_wq) { spin_lock_irqsave(ap->lock, flags); - ap->ops->sff_irq_on(ap); + ata_sff_irq_on(ap); ata_qc_complete(qc); spin_unlock_irqrestore(ap->lock, flags); } else @@ -1293,7 +1133,7 @@ fsm_start: if (in_wq) spin_unlock_irqrestore(ap->lock, flags); - /* if polling, ata_pio_task() handles the rest. + /* if polling, ata_sff_pio_task() handles the rest. * otherwise, interrupt handler takes over from here. */ break; @@ -1458,14 +1298,38 @@ fsm_start: } EXPORT_SYMBOL_GPL(ata_sff_hsm_move); -void ata_pio_task(struct work_struct *work) +void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) +{ + /* may fail if ata_sff_flush_pio_task() in progress */ + queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, + msecs_to_jiffies(delay)); +} +EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); + +void ata_sff_flush_pio_task(struct ata_port *ap) +{ + DPRINTK("ENTER\n"); + + cancel_rearming_delayed_work(&ap->sff_pio_task); + ap->hsm_task_state = HSM_ST_IDLE; + + if (ata_msg_ctl(ap)) + ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); +} + +static void ata_sff_pio_task(struct work_struct *work) { struct ata_port *ap = - container_of(work, struct ata_port, port_task.work); - struct ata_queued_cmd *qc = ap->port_task_data; + container_of(work, struct ata_port, sff_pio_task.work); + struct ata_queued_cmd *qc; u8 status; int poll_next; + /* qc can be NULL if timeout occurred */ + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (!qc) + return; + fsm_start: WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); @@ -1481,7 +1345,7 @@ fsm_start: msleep(2); status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { - ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); + ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); return; } } @@ -1497,15 +1361,11 @@ fsm_start: } /** - * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner + * ata_sff_qc_issue - issue taskfile to a SFF controller * @qc: command to issue to device * - * Using various libata functions and hooks, this function - * starts an ATA command. ATA commands are grouped into - * classes called "protocols", and issuing each type of protocol - * is slightly different. - * - * May be used as the qc_issue() entry in ata_port_operations. + * This function issues a PIO or NODATA command to a SFF + * controller. * * LOCKING: * spin_lock_irqsave(host lock) @@ -1520,23 +1380,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) /* Use polling pio if the LLD doesn't handle * interrupt driven pio and atapi CDB interrupt. */ - if (ap->flags & ATA_FLAG_PIO_POLLING) { - switch (qc->tf.protocol) { - case ATA_PROT_PIO: - case ATA_PROT_NODATA: - case ATAPI_PROT_PIO: - case ATAPI_PROT_NODATA: - qc->tf.flags |= ATA_TFLAG_POLLING; - break; - case ATAPI_PROT_DMA: - if (qc->dev->flags & ATA_DFLAG_CDB_INTR) - /* see ata_dma_blacklisted() */ - BUG(); - break; - default: - break; - } - } + if (ap->flags & ATA_FLAG_PIO_POLLING) + qc->tf.flags |= ATA_TFLAG_POLLING; /* select the device */ ata_dev_select(ap, qc->dev->devno, 1, 0); @@ -1551,17 +1396,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_LAST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_pio_queue_task(ap, qc, 0); - - break; + ata_sff_queue_pio_task(ap, 0); - case ATA_PROT_DMA: - WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->ops->bmdma_start(qc); /* initiate bmdma */ - ap->hsm_task_state = HSM_ST_LAST; break; case ATA_PROT_PIO: @@ -1573,20 +1409,21 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ ap->hsm_task_state = HSM_ST_FIRST; - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(ap, 0); - /* always send first data block using - * the ata_pio_task() codepath. + /* always send first data block using the + * ata_sff_pio_task() codepath. */ } else { /* PIO data in protocol */ ap->hsm_task_state = HSM_ST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(ap, 0); - /* if polling, ata_pio_task() handles the rest. - * otherwise, interrupt handler takes over from here. + /* if polling, ata_sff_pio_task() handles the + * rest. otherwise, interrupt handler takes + * over from here. */ } @@ -1604,19 +1441,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_pio_queue_task(ap, qc, 0); - break; - - case ATAPI_PROT_DMA: - WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); - - ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ - ap->ops->bmdma_setup(qc); /* set up bmdma */ - ap->hsm_task_state = HSM_ST_FIRST; - - /* send cdb by polling if no cdb interrupt */ - if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(ap, 0); break; default: @@ -1648,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -/** - * ata_sff_host_intr - Handle host interrupt for given (port, task) - * @ap: Port on which interrupt arrived (possibly...) - * @qc: Taskfile currently active in engine - * - * Handle host interrupt for given queued command. Currently, - * only DMA interrupts are handled. All other commands are - * handled via polling with interrupts disabled (nIEN bit). - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * One if interrupt was handled, zero if not (shared irq). - */ -unsigned int ata_sff_host_intr(struct ata_port *ap, - struct ata_queued_cmd *qc) +static unsigned int ata_sff_idle_irq(struct ata_port *ap) { - struct ata_eh_info *ehi = &ap->link.eh_info; - u8 status, host_stat = 0; - bool bmdma_stopped = false; + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_port_printk(ap, KERN_WARNING, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ +} + +static unsigned int __ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, + bool hsmv_on_idle) +{ + u8 status; VPRINTK("ata%u: protocol %d task_state %d\n", ap->print_id, qc->tf.protocol, ap->hsm_task_state); @@ -1685,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, * need to check ata_is_atapi(qc->tf.protocol) again. */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - goto idle_irq; - break; - case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA) { - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", - ap->print_id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); - bmdma_stopped = true; - - if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ - qc->err_mask |= AC_ERR_HOST_BUS; - ap->hsm_task_state = HSM_ST_ERR; - } - } + return ata_sff_idle_irq(ap); break; case HSM_ST: + case HSM_ST_LAST: break; default: - goto idle_irq; + return ata_sff_idle_irq(ap); } - /* check main status, clearing INTRQ if needed */ status = ata_sff_irq_status(ap); if (status & ATA_BUSY) { - if (bmdma_stopped) { + if (hsmv_on_idle) { /* BMDMA engine is already stopped, we're screwed */ qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; } else - goto idle_irq; + return ata_sff_idle_irq(ap); } - /* ack bmdma irq events */ - ap->ops->sff_irq_clear(ap); + /* clear irq events */ + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_hsm_move(ap, qc, status, 0); - if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) - ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); - return 1; /* irq handled */ - -idle_irq: - ap->stats.idle_irq++; - -#ifdef ATA_IRQ_TRAP - if ((ap->stats.idle_irq % 1000) == 0) { - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); - return 1; - } -#endif - return 0; /* irq not handled */ } -EXPORT_SYMBOL_GPL(ata_sff_host_intr); /** - * ata_sff_interrupt - Default ATA host interrupt handler - * @irq: irq line (unused) - * @dev_instance: pointer to our ata_host information structure + * ata_sff_port_intr - Handle SFF port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine * - * Default interrupt handler for PCI IDE devices. Calls - * ata_sff_host_intr() for each port that is not disabled. + * Handle port interrupt for given queued command. * * LOCKING: - * Obtains host lock during operation. + * spin_lock_irqsave(host lock) * * RETURNS: - * IRQ_NONE or IRQ_HANDLED. + * One if interrupt was handled, zero if not (shared irq). */ -irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + return __ata_sff_port_intr(ap, qc, false); +} +EXPORT_SYMBOL_GPL(ata_sff_port_intr); + +static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, + unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) { struct ata_host *host = dev_instance; bool retried = false; @@ -1785,13 +1576,10 @@ retry: struct ata_port *ap = host->ports[i]; struct ata_queued_cmd *qc; - if (unlikely(ap->flags & ATA_FLAG_DISABLED)) - continue; - qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + handled |= port_intr(ap, qc); else polling |= 1 << i; } else @@ -1818,7 +1606,8 @@ retry: if (idle & (1 << i)) { ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } else { /* clear INTRQ and check if BUSY cleared */ if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) @@ -1840,6 +1629,25 @@ retry: return IRQ_RETVAL(handled); } + +/** + * ata_sff_interrupt - Default SFF ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_sff_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); +} EXPORT_SYMBOL_GPL(ata_sff_interrupt); /** @@ -1862,11 +1670,8 @@ void ata_sff_lost_interrupt(struct ata_port *ap) /* Only one outstanding command per SFF channel */ qc = ata_qc_from_tag(ap, ap->link.active_tag); - /* Check we have a live one.. */ - if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE)) - return; - /* We cannot lose an interrupt on a polled command */ - if (qc->tf.flags & ATA_TFLAG_POLLING) + /* We cannot lose an interrupt on a non-existent or polled command */ + if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) return; /* See if the controller thinks it is still busy - if so the command isn't a lost IRQ but is still in progress */ @@ -1880,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) status); /* Run the host interrupt logic as if the interrupt had not been lost */ - ata_sff_host_intr(ap, qc); + ata_sff_port_intr(ap, qc); } EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); @@ -1888,20 +1693,18 @@ EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); * ata_sff_freeze - Freeze SFF controller port * @ap: port to freeze * - * Freeze BMDMA controller port. + * Freeze SFF controller port. * * LOCKING: * Inherited from caller. */ void ata_sff_freeze(struct ata_port *ap) { - struct ata_ioports *ioaddr = &ap->ioaddr; - ap->ctl |= ATA_NIEN; ap->last_ctl = ap->ctl; - if (ioaddr->ctl_addr) - iowrite8(ap->ctl, ioaddr->ctl_addr); + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) + ata_sff_set_devctl(ap, ap->ctl); /* Under certain circumstances, some controllers raise IRQ on * ATA_NIEN manipulation. Also, many controllers fail to mask @@ -1909,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap) */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_freeze); @@ -1926,8 +1730,9 @@ void ata_sff_thaw(struct ata_port *ap) { /* clear & re-enable interrupts */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ap->ops->sff_irq_on(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_sff_irq_on(ap); } EXPORT_SYMBOL_GPL(ata_sff_thaw); @@ -2301,8 +2106,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) } /* set up device control */ - if (ap->ioaddr.ctl_addr) { - iowrite8(ap->ctl, ap->ioaddr.ctl_addr); + if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { + ata_sff_set_devctl(ap, ap->ctl); ap->last_ctl = ap->ctl; } } @@ -2342,7 +2147,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc) EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); /** - * ata_sff_error_handler - Stock error handler for BMDMA controller + * ata_sff_error_handler - Stock error handler for SFF controller * @ap: port to handle error for * * Stock error handler for SFF controller. It can handle both @@ -2359,62 +2164,32 @@ void ata_sff_error_handler(struct ata_port *ap) ata_reset_fn_t hardreset = ap->ops->hardreset; struct ata_queued_cmd *qc; unsigned long flags; - int thaw = 0; qc = __ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) qc = NULL; - /* reset PIO HSM and stop DMA engine */ spin_lock_irqsave(ap->lock, flags); - ap->hsm_task_state = HSM_ST_IDLE; - - if (ap->ioaddr.bmdma_addr && - qc && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) { - u8 host_stat; - - host_stat = ap->ops->bmdma_status(ap); - - /* BMDMA controllers indicate host bus error by - * setting DMA_ERR bit and timing out. As it wasn't - * really a timeout event, adjust error mask and - * cancel frozen state. - */ - if (qc->err_mask == AC_ERR_TIMEOUT - && (host_stat & ATA_DMA_ERR)) { - qc->err_mask = AC_ERR_HOST_BUS; - thaw = 1; - } - - ap->ops->bmdma_stop(qc); - } - - ata_sff_sync(ap); /* FIXME: We don't need this */ - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - /* We *MUST* do FIFO draining before we issue a reset as several - * devices helpfully clear their internal state and will lock solid - * if we touch the data port post reset. Pass qc in case anyone wants - * to do different PIO/DMA recovery or has per command fixups + /* + * We *MUST* do FIFO draining before we issue a reset as + * several devices helpfully clear their internal state and + * will lock solid if we touch the data port post reset. Pass + * qc in case anyone wants to do different PIO/DMA recovery or + * has per command fixups */ - if (ap->ops->drain_fifo) - ap->ops->drain_fifo(qc); + if (ap->ops->sff_drain_fifo) + ap->ops->sff_drain_fifo(qc); spin_unlock_irqrestore(ap->lock, flags); - if (thaw) - ata_eh_thaw_port(ap); - - /* PIO and DMA engines have been stopped, perform recovery */ - - /* Ignore ata_sff_softreset if ctl isn't accessible and - * built-in hardresets if SCR access isn't available. - */ + /* ignore ata_sff_softreset if ctl isn't accessible */ if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) softreset = NULL; - if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) + + /* ignore built-in hardresets if SCR access is not available */ + if ((hardreset == sata_std_hardreset || + hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) hardreset = NULL; ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, @@ -2423,73 +2198,6 @@ void ata_sff_error_handler(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_sff_error_handler); /** - * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller - * @qc: internal command to clean up - * - * LOCKING: - * Kernel thread context (may sleep) - */ -void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned long flags; - - spin_lock_irqsave(ap->lock, flags); - - ap->hsm_task_state = HSM_ST_IDLE; - - if (ap->ioaddr.bmdma_addr) - ap->ops->bmdma_stop(qc); - - spin_unlock_irqrestore(ap->lock, flags); -} -EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); - -/** - * ata_sff_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table if the device - * is DMA capable SFF. - * - * May be used as the port_start() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -int ata_sff_port_start(struct ata_port *ap) -{ - if (ap->ioaddr.bmdma_addr) - return ata_port_start(ap); - return 0; -} -EXPORT_SYMBOL_GPL(ata_sff_port_start); - -/** - * ata_sff_port_start32 - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table if the device - * is DMA capable SFF. - * - * May be used as the port_start() entry in ata_port_operations for - * devices that are capable of 32bit PIO. - * - * LOCKING: - * Inherited from caller. - */ -int ata_sff_port_start32(struct ata_port *ap) -{ - ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; - if (ap->ioaddr.bmdma_addr) - return ata_port_start(ap); - return 0; -} -EXPORT_SYMBOL_GPL(ata_sff_port_start32); - -/** * ata_sff_std_ports - initialize ioaddr with standard port offsets. * @ioaddr: IO address structure to be initialized * @@ -2515,302 +2223,8 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) } EXPORT_SYMBOL_GPL(ata_sff_std_ports); -unsigned long ata_bmdma_mode_filter(struct ata_device *adev, - unsigned long xfer_mask) -{ - /* Filter out DMA modes if the device has been configured by - the BIOS as PIO only */ - - if (adev->link->ap->ioaddr.bmdma_addr == NULL) - xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - return xfer_mask; -} -EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); - -/** - * ata_bmdma_setup - Set up PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_setup(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); - u8 dmactl; - - /* load PRD table addr. */ - mb(); /* make sure PRD table writes are visible to controller */ - iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); - - /* specify data direction, triple-check start bit is clear */ - dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); - if (!rw) - dmactl |= ATA_DMA_WR; - iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - /* issue r/w command */ - ap->ops->sff_exec_command(ap, &qc->tf); -} -EXPORT_SYMBOL_GPL(ata_bmdma_setup); - -/** - * ata_bmdma_start - Start a PCI IDE BMDMA transaction - * @qc: Info associated with this ATA transaction. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_start(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - u8 dmactl; - - /* start host DMA transaction */ - dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); - - /* Strictly, one may wish to issue an ioread8() here, to - * flush the mmio write. However, control also passes - * to the hardware at this point, and it will interrupt - * us when we are to resume control. So, in effect, - * we don't care when the mmio write flushes. - * Further, a read of the DMA status register _immediately_ - * following the write may not be what certain flaky hardware - * is expected, so I think it is best to not add a readb() - * without first all the MMIO ATA cards/mobos. - * Or maybe I'm just being paranoid. - * - * FIXME: The posting of this write means I/O starts are - * unneccessarily delayed for MMIO - */ -} -EXPORT_SYMBOL_GPL(ata_bmdma_start); - -/** - * ata_bmdma_stop - Stop PCI IDE BMDMA transfer - * @qc: Command we are ending DMA for - * - * Clears the ATA_DMA_START flag in the dma control register - * - * May be used as the bmdma_stop() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_bmdma_stop(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - /* clear start/stop bit */ - iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, - mmio + ATA_DMA_CMD); - - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_dma_pause(ap); -} -EXPORT_SYMBOL_GPL(ata_bmdma_stop); - -/** - * ata_bmdma_status - Read PCI IDE BMDMA status - * @ap: Port associated with this ATA transaction. - * - * Read and return BMDMA status register. - * - * May be used as the bmdma_status() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -u8 ata_bmdma_status(struct ata_port *ap) -{ - return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); -} -EXPORT_SYMBOL_GPL(ata_bmdma_status); - -/** - * ata_bus_reset - reset host port and associated ATA channel - * @ap: port to reset - * - * This is typically the first time we actually start issuing - * commands to the ATA channel. We wait for BSY to clear, then - * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its - * result. Determine what devices, if any, are on the channel - * by looking at the device 0/1 error register. Look at the signature - * stored in each device's taskfile registers, to determine if - * the device is ATA or ATAPI. - * - * LOCKING: - * PCI/etc. bus probe sem. - * Obtains host lock. - * - * SIDE EFFECTS: - * Sets ATA_FLAG_DISABLED if bus reset fails. - * - * DEPRECATED: - * This function is only for drivers which still use old EH and - * will be removed soon. - */ -void ata_bus_reset(struct ata_port *ap) -{ - struct ata_device *device = ap->link.device; - struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - u8 err; - unsigned int dev0, dev1 = 0, devmask = 0; - int rc; - - DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); - - /* determine if device 0/1 are present */ - if (ap->flags & ATA_FLAG_SATA_RESET) - dev0 = 1; - else { - dev0 = ata_devchk(ap, 0); - if (slave_possible) - dev1 = ata_devchk(ap, 1); - } - - if (dev0) - devmask |= (1 << 0); - if (dev1) - devmask |= (1 << 1); - - /* select device 0 again */ - ap->ops->sff_dev_select(ap, 0); - - /* issue bus reset */ - if (ap->flags & ATA_FLAG_SRST) { - rc = ata_bus_softreset(ap, devmask, - ata_deadline(jiffies, 40000)); - if (rc && rc != -ENODEV) - goto err_out; - } - - /* - * determine by signature whether we have ATA or ATAPI devices - */ - device[0].class = ata_sff_dev_classify(&device[0], dev0, &err); - if ((slave_possible) && (err != 0x81)) - device[1].class = ata_sff_dev_classify(&device[1], dev1, &err); - - /* is double-select really necessary? */ - if (device[1].class != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 1); - if (device[0].class != ATA_DEV_NONE) - ap->ops->sff_dev_select(ap, 0); - - /* if no devices were detected, disable this port */ - if ((device[0].class == ATA_DEV_NONE) && - (device[1].class == ATA_DEV_NONE)) - goto err_out; - - if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { - /* set up device control for ATA_FLAG_SATA_RESET */ - iowrite8(ap->ctl, ioaddr->ctl_addr); - ap->last_ctl = ap->ctl; - } - - DPRINTK("EXIT\n"); - return; - -err_out: - ata_port_printk(ap, KERN_ERR, "disabling port\n"); - ata_port_disable(ap); - - DPRINTK("EXIT\n"); -} -EXPORT_SYMBOL_GPL(ata_bus_reset); - #ifdef CONFIG_PCI -/** - * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex - * @pdev: PCI device - * - * Some PCI ATA devices report simplex mode but in fact can be told to - * enter non simplex mode. This implements the necessary logic to - * perform the task on such devices. Calling it on other devices will - * have -undefined- behaviour. - */ -int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) -{ - unsigned long bmdma = pci_resource_start(pdev, 4); - u8 simplex; - - if (bmdma == 0) - return -ENOENT; - - simplex = inb(bmdma + 0x02); - outb(simplex & 0x60, bmdma + 0x02); - simplex = inb(bmdma + 0x02); - if (simplex & 0x80) - return -EOPNOTSUPP; - return 0; -} -EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); - -/** - * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host - * @host: target ATA host - * - * Acquire PCI BMDMA resources and initialize @host accordingly. - * - * LOCKING: - * Inherited from calling layer (may sleep). - * - * RETURNS: - * 0 on success, -errno otherwise. - */ -int ata_pci_bmdma_init(struct ata_host *host) -{ - struct device *gdev = host->dev; - struct pci_dev *pdev = to_pci_dev(gdev); - int i, rc; - - /* No BAR4 allocation: No DMA */ - if (pci_resource_start(pdev, 4) == 0) - return 0; - - /* TODO: If we get no DMA mask we should fall back to PIO */ - rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - return rc; - rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); - if (rc) - return rc; - - /* request and iomap DMA region */ - rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); - if (rc) { - dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); - return -ENOMEM; - } - host->iomap = pcim_iomap_table(pdev); - - for (i = 0; i < 2; i++) { - struct ata_port *ap = host->ports[i]; - void __iomem *bmdma = host->iomap[4] + 8 * i; - - if (ata_port_is_dummy(ap)) - continue; - - ap->ioaddr.bmdma_addr = bmdma; - if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && - (ioread8(bmdma + 2) & 0x80)) - host->flags |= ATA_HOST_SIMPLEX; - - ata_port_desc(ap, "bmdma 0x%llx", - (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); - } - - return 0; -} -EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); - static int ata_resources_present(struct pci_dev *pdev, int port) { int i; @@ -2905,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host) EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** - * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host + * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host * @pdev: target PCI device * @ppi: array of port_info, must be enough for two ports * @r_host: out argument for the initialized ATA host * - * Helper to allocate ATA host for @pdev, acquire all native PCI - * resources and initialize it accordingly in one go. + * Helper to allocate PIO-only SFF ATA host for @pdev, acquire + * all PCI resources and initialize it accordingly in one go. * * LOCKING: * Inherited from calling layer (may sleep). @@ -2941,22 +2355,10 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, if (rc) goto err_out; - /* init DMA related stuff */ - rc = ata_pci_bmdma_init(host); - if (rc) - goto err_bmdma; - devres_remove_group(&pdev->dev, NULL); *r_host = host; return 0; -err_bmdma: - /* This is necessary because PCI and iomap resources are - * merged and releasing the top group won't release the - * acquired resources if some of those have been acquired - * before entering this function. - */ - pcim_iounmap_regions(pdev, 0xf); err_out: devres_release_group(&pdev->dev, NULL); return rc; @@ -3057,8 +2459,21 @@ out: } EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); +static const struct ata_port_info *ata_sff_find_valid_pi( + const struct ata_port_info * const *ppi) +{ + int i; + + /* look up the first valid port_info */ + for (i = 0; i < 2 && ppi[i]; i++) + if (ppi[i]->port_ops != &ata_dummy_port_ops) + return ppi[i]; + + return NULL; +} + /** - * ata_pci_sff_init_one - Initialize/register PCI IDE host controller + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller * @pdev: Controller to be initialized * @ppi: array of port_info, must be enough for two ports * @sht: scsi_host_template to use when registering the host @@ -3067,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); * * This is a helper function which can be called from a driver's * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers. - * - * This function calls pci_enable_device(), reserves its register - * regions, sets the dma mask, enables bus master mode, and calls - * ata_device_add() + * IDE taskfile registers and is PIO only. * * ASSUMPTION: * Nobody makes a single channel controller that appears solely as @@ -3088,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, struct scsi_host_template *sht, void *host_priv, int hflag) { struct device *dev = &pdev->dev; - const struct ata_port_info *pi = NULL; + const struct ata_port_info *pi; struct ata_host *host = NULL; - int i, rc; + int rc; DPRINTK("ENTER\n"); - /* look up the first valid port_info */ - for (i = 0; i < 2 && ppi[i]; i++) { - if (ppi[i]->port_ops != &ata_dummy_port_ops) { - pi = ppi[i]; - break; - } - } - + pi = ata_sff_find_valid_pi(ppi); if (!pi) { dev_printk(KERN_ERR, &pdev->dev, "no valid port_info specified\n"); @@ -3122,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, host->private_data = host_priv; host->flags |= hflag; - pci_set_master(pdev); rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); out: if (rc == 0) @@ -3135,3 +2538,801 @@ out: EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); #endif /* CONFIG_PCI */ + +/* + * BMDMA support + */ + +#ifdef CONFIG_ATA_BMDMA + +const struct ata_port_operations ata_bmdma_port_ops = { + .inherits = &ata_sff_port_ops, + + .error_handler = ata_bmdma_error_handler, + .post_internal_cmd = ata_bmdma_post_internal_cmd, + + .qc_prep = ata_bmdma_qc_prep, + .qc_issue = ata_bmdma_qc_issue, + + .sff_irq_clear = ata_bmdma_irq_clear, + .bmdma_setup = ata_bmdma_setup, + .bmdma_start = ata_bmdma_start, + .bmdma_stop = ata_bmdma_stop, + .bmdma_status = ata_bmdma_status, + + .port_start = ata_bmdma_port_start, +}; +EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); + +const struct ata_port_operations ata_bmdma32_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .sff_data_xfer = ata_sff_data_xfer32, + .port_start = ata_bmdma_port_start32, +}; +EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); + +/** + * ata_bmdma_fill_sg - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + prd[pi].addr = cpu_to_le32(addr); + prd[pi].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; + } + } + + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table + * @qc: Metadata associated with taskfile to be transferred + * + * Fill PCI IDE PRD (scatter-gather) table with segments + * associated with the current disk command. Perform the fill + * so that we avoid writing any length 64K records for + * controllers that don't follow the spec. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + */ +static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; + struct scatterlist *sg; + unsigned int si, pi; + + pi = 0; + for_each_sg(qc->sg, sg, qc->n_elem, si) { + u32 addr, offset; + u32 sg_len, len, blen; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + offset = addr & 0xffff; + len = sg_len; + if ((offset + sg_len) > 0x10000) + len = 0x10000 - offset; + + blen = len & 0xffff; + prd[pi].addr = cpu_to_le32(addr); + if (blen == 0) { + /* Some PATA chipsets like the CS5530 can't + cope with 0x0000 meaning 64K as the spec + says */ + prd[pi].flags_len = cpu_to_le32(0x8000); + blen = 0x8000; + prd[++pi].addr = cpu_to_le32(addr + 0x8000); + } + prd[pi].flags_len = cpu_to_le32(blen); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); + + pi++; + sg_len -= len; + addr += len; + } + } + + prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_bmdma_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + ata_bmdma_fill_sg(qc); +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); + +/** + * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission + * @qc: Metadata associated with taskfile to be prepared + * + * Prepare ATA taskfile for submission. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +{ + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + ata_bmdma_fill_sg_dumb(qc); +} +EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); + +/** + * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller + * @qc: command to issue to device + * + * This function issues a PIO, NODATA or DMA command to a + * SFF/BMDMA controller. PIO and NODATA are handled by + * ata_sff_qc_issue(). + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * Zero on success, AC_ERR_* mask on failure + */ +unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + /* see ata_dma_blacklisted() */ + BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) && + qc->tf.protocol == ATAPI_PROT_DMA); + + /* defer PIO handling to sff_qc_issue */ + if (!ata_is_dma(qc->tf.protocol)) + return ata_sff_qc_issue(qc); + + /* select the device */ + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* start the command */ + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + ap->hsm_task_state = HSM_ST_LAST; + break; + + case ATAPI_PROT_DMA: + WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); + + ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_setup(qc); /* set up bmdma */ + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + ata_sff_queue_pio_task(ap, 0); + break; + + default: + WARN_ON(1); + return AC_ERR_SYSTEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); + +/** + * ata_bmdma_port_intr - Handle BMDMA port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle port interrupt for given queued command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ +unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 host_stat = 0; + bool bmdma_stopped = false; + unsigned int handled; + + if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + return ata_sff_idle_irq(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + bmdma_stopped = true; + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transfering data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + + handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); + + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + + return handled; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); + +/** + * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_bmdma_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); +} +EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); + +/** + * ata_bmdma_error_handler - Stock error handler for BMDMA controller + * @ap: port to handle error for + * + * Stock error handler for BMDMA controller. It can handle both + * PATA and SATA controllers. Most BMDMA controllers should be + * able to use this EH as-is or with some added handling before + * and after. + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_error_handler(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + bool thaw = false; + + qc = __ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) + qc = NULL; + + /* reset PIO HSM and stop DMA engine */ + spin_lock_irqsave(ap->lock, flags); + + if (qc && ata_is_dma(qc->tf.protocol)) { + u8 host_stat; + + host_stat = ap->ops->bmdma_status(ap); + + /* BMDMA controllers indicate host bus error by + * setting DMA_ERR bit and timing out. As it wasn't + * really a timeout event, adjust error mask and + * cancel frozen state. + */ + if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { + qc->err_mask = AC_ERR_HOST_BUS; + thaw = true; + } + + ap->ops->bmdma_stop(qc); + + /* if we're gonna thaw, make sure IRQ is clear */ + if (thaw) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + } + } + + spin_unlock_irqrestore(ap->lock, flags); + + if (thaw) + ata_eh_thaw_port(ap); + + ata_sff_error_handler(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); + +/** + * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA + * @qc: internal command to clean up + * + * LOCKING: + * Kernel thread context (may sleep) + */ +void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned long flags; + + if (ata_is_dma(qc->tf.protocol)) { + spin_lock_irqsave(ap->lock, flags); + ap->ops->bmdma_stop(qc); + spin_unlock_irqrestore(ap->lock, flags); + } +} +EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); + +/** + * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Clear interrupt and error flags in DMA status register. + * + * May be used as the irq_clear() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + + iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); + +/** + * ata_bmdma_setup - Set up PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 dmactl; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* issue r/w command */ + ap->ops->sff_exec_command(ap, &qc->tf); +} +EXPORT_SYMBOL_GPL(ata_bmdma_setup); + +/** + * ata_bmdma_start - Start a PCI IDE BMDMA transaction + * @qc: Info associated with this ATA transaction. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 dmactl; + + /* start host DMA transaction */ + dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* Strictly, one may wish to issue an ioread8() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + * + * FIXME: The posting of this write means I/O starts are + * unneccessarily delayed for MMIO + */ +} +EXPORT_SYMBOL_GPL(ata_bmdma_start); + +/** + * ata_bmdma_stop - Stop PCI IDE BMDMA transfer + * @qc: Command we are ending DMA for + * + * Clears the ATA_DMA_START flag in the dma control register + * + * May be used as the bmdma_stop() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, + mmio + ATA_DMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_stop); + +/** + * ata_bmdma_status - Read PCI IDE BMDMA status + * @ap: Port associated with this ATA transaction. + * + * Read and return BMDMA status register. + * + * May be used as the bmdma_status() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +u8 ata_bmdma_status(struct ata_port *ap) +{ + return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_status); + + +/** + * ata_bmdma_port_start - Set port up for bmdma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Allocates space for PRD table. + * + * May be used as the port_start() entry in ata_port_operations. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start(struct ata_port *ap) +{ + if (ap->mwdma_mask || ap->udma_mask) { + ap->bmdma_prd = + dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, + &ap->bmdma_prd_dma, GFP_KERNEL); + if (!ap->bmdma_prd) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_start); + +/** + * ata_bmdma_port_start32 - Set port up for dma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Enables 32bit PIO and allocates space for PRD + * table. + * + * May be used as the port_start() entry in ata_port_operations for + * devices that are capable of 32bit PIO. + * + * LOCKING: + * Inherited from caller. + */ +int ata_bmdma_port_start32(struct ata_port *ap) +{ + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; + return ata_bmdma_port_start(ap); +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); + +#ifdef CONFIG_PCI + +/** + * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex + * @pdev: PCI device + * + * Some PCI ATA devices report simplex mode but in fact can be told to + * enter non simplex mode. This implements the necessary logic to + * perform the task on such devices. Calling it on other devices will + * have -undefined- behaviour. + */ +int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) +{ + unsigned long bmdma = pci_resource_start(pdev, 4); + u8 simplex; + + if (bmdma == 0) + return -ENOENT; + + simplex = inb(bmdma + 0x02); + outb(simplex & 0x60, bmdma + 0x02); + simplex = inb(bmdma + 0x02); + if (simplex & 0x80) + return -EOPNOTSUPP; + return 0; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); + +static void ata_bmdma_nodma(struct ata_host *host, const char *reason) +{ + int i; + + dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", + reason); + + for (i = 0; i < 2; i++) { + host->ports[i]->mwdma_mask = 0; + host->ports[i]->udma_mask = 0; + } +} + +/** + * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host + * @host: target ATA host + * + * Acquire PCI BMDMA resources and initialize @host accordingly. + * + * LOCKING: + * Inherited from calling layer (may sleep). + */ +void ata_pci_bmdma_init(struct ata_host *host) +{ + struct device *gdev = host->dev; + struct pci_dev *pdev = to_pci_dev(gdev); + int i, rc; + + /* No BAR4 allocation: No DMA */ + if (pci_resource_start(pdev, 4) == 0) { + ata_bmdma_nodma(host, "BAR4 is zero"); + return; + } + + /* + * Some controllers require BMDMA region to be initialized + * even if DMA is not in use to clear IRQ status via + * ->sff_irq_clear method. Try to initialize bmdma_addr + * regardless of dma masks. + */ + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, "failed to set dma mask"); + if (!rc) { + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + ata_bmdma_nodma(host, + "failed to set consistent dma mask"); + } + + /* request and iomap DMA region */ + rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); + if (rc) { + ata_bmdma_nodma(host, "failed to request/iomap BAR4"); + return; + } + host->iomap = pcim_iomap_table(pdev); + + for (i = 0; i < 2; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *bmdma = host->iomap[4] + 8 * i; + + if (ata_port_is_dummy(ap)) + continue; + + ap->ioaddr.bmdma_addr = bmdma; + if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && + (ioread8(bmdma + 2) & 0x80)) + host->flags |= ATA_HOST_SIMPLEX; + + ata_port_desc(ap, "bmdma 0x%llx", + (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); + } +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); + +/** + * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host + * + * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI + * resources and initialize it accordingly in one go. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) +{ + int rc; + + rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); + if (rc) + return rc; + + ata_pci_bmdma_init(*r_host); + return 0; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); + +/** + * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflags: host flags + * + * This function is similar to ata_pci_sff_init_one() but also + * takes care of BMDMA initialization. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags) +{ + struct device *dev = &pdev->dev; + const struct ata_port_info *pi; + struct ata_host *host = NULL; + int rc; + + DPRINTK("ENTER\n"); + + pi = ata_sff_find_valid_pi(ppi); + if (!pi) { + dev_printk(KERN_ERR, &pdev->dev, + "no valid port_info specified\n"); + return -EINVAL; + } + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + goto out; + + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + goto out; + host->private_data = host_priv; + host->flags |= hflags; + + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); + out: + if (rc == 0) + devres_remove_group(&pdev->dev, NULL); + else + devres_release_group(&pdev->dev, NULL); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); + +#endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ + +/** + * ata_sff_port_init - Initialize SFF/BMDMA ATA port + * @ap: Port to initialize + * + * Called on port allocation to initialize SFF/BMDMA specific + * fields. + * + * LOCKING: + * None. + */ +void ata_sff_port_init(struct ata_port *ap) +{ + INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); + ap->ctl = ATA_DEVCTL_OBS; + ap->last_ctl = 0xFF; +} + +int __init ata_sff_init(void) +{ + /* + * FIXME: In UP case, there is only one workqueue thread and if you + * have more than one PIO device, latency is bloody awful, with + * occasional multi-second "hiccups" as one PIO device waits for + * another. It's an ugly wart that users DO occasionally complain + * about; luckily most users have at most one PIO polled device. + */ + ata_sff_wq = create_workqueue("ata_sff"); + if (!ata_sff_wq) + return -ENOMEM; + + return 0; +} + +void __exit ata_sff_exit(void) +{ + destroy_workqueue(ata_sff_wq); +} diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 823e6309636..4b84ed60324 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -38,17 +38,6 @@ struct ata_scsi_args { void (*done)(struct scsi_cmnd *); }; -static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset) -{ - if (reset == sata_std_hardreset) - return 1; -#ifdef CONFIG_ATA_SFF - if (reset == sata_sff_hardreset) - return 1; -#endif - return 0; -} - /* libata-core.c */ enum { /* flags for ata_dev_read_id() */ @@ -79,7 +68,6 @@ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, u64 block, u32 n_block, unsigned int tf_flags, unsigned int tag); extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev); -extern void ata_port_flush_task(struct ata_port *ap); extern unsigned ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf, const u8 *cdb, int dma_dir, void *buf, unsigned int buflen, @@ -202,10 +190,19 @@ static inline int sata_pmp_attach(struct ata_device *dev) /* libata-sff.c */ #ifdef CONFIG_ATA_SFF -extern void ata_dev_select(struct ata_port *ap, unsigned int device, - unsigned int wait, unsigned int can_sleep); -extern u8 ata_irq_on(struct ata_port *ap); -extern void ata_pio_task(struct work_struct *work); +extern void ata_sff_flush_pio_task(struct ata_port *ap); +extern void ata_sff_port_init(struct ata_port *ap); +extern int ata_sff_init(void); +extern void ata_sff_exit(void); +#else /* CONFIG_ATA_SFF */ +static inline void ata_sff_flush_pio_task(struct ata_port *ap) +{ } +static inline void ata_sff_port_init(struct ata_port *ap) +{ } +static inline int ata_sff_init(void) +{ return 0; } +static inline void ata_sff_exit(void) +{ } #endif /* CONFIG_ATA_SFF */ #endif /* __LIBATA_H__ */ diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index 1ea2be0f4b9..c8d47034d5e 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -101,7 +101,7 @@ static unsigned long pacpi_discover_modes(struct ata_port *ap, struct ata_device static unsigned long pacpi_mode_filter(struct ata_device *adev, unsigned long mask) { struct pata_acpi *acpi = adev->link->ap->private_data; - return ata_bmdma_mode_filter(adev, mask & acpi->mask[adev->devno]); + return mask & acpi->mask[adev->devno]; } /** @@ -172,7 +172,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) struct pata_acpi *acpi = ap->private_data; if (acpi->gtm.flags & 0x10) - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); if (adev != acpi->last) { pacpi_set_piomode(ap, adev); @@ -180,7 +180,7 @@ static unsigned int pacpi_qc_issue(struct ata_queued_cmd *qc) pacpi_set_dmamode(ap, adev); acpi->last = adev; } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } /** @@ -205,7 +205,7 @@ static int pacpi_port_start(struct ata_port *ap) return -ENOMEM; acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]); acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]); - ret = ata_sff_port_start(ap); + ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; @@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) return rc; pcim_pin_device(pdev); } - return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0); } static const struct pci_device_id pacpi_pci_tbl[] = { diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index dc61b72f751..794ec6e3275 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -124,7 +124,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strstr(model_num, "WDC")) return mask &= ~ATA_MASK_UDMA; - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[0] = &info_20_udma; } - return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) + return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + else + return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index d95eca9c547..620a07cabe3 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* And fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 4d066d6c30f..ba43f0f8c88 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) BUG_ON(ppi[0] == NULL); - return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c index c6a946aa252..0da0dcc7dd0 100644 --- a/drivers/ata/pata_at91.c +++ b/drivers/ata/pata_at91.c @@ -202,7 +202,6 @@ static struct ata_port_operations pata_at91_port_ops = { .sff_data_xfer = pata_at91_data_xfer_noirq, .set_piomode = pata_at91_set_piomode, .cable_detect = ata_cable_40wire, - .port_start = ATA_OP_NULL, }; static int __devinit pata_at91_probe(struct platform_device *pdev) diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index cbaf2eddac6..43755616dc5 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -217,7 +217,7 @@ static struct scsi_host_template atiixp_sht = { static struct ata_port_operations atiixp_port_ops = { .inherits = &ata_bmdma_port_ops, - .qc_prep = ata_sff_dumb_qc_prep, + .qc_prep = ata_bmdma_dumb_qc_prep, .bmdma_start = atiixp_bmdma_start, .bmdma_stop = atiixp_bmdma_stop, @@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) ppi[i] = &ata_dummy_port_info; - return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id atiixp[] = { diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index bb6e0746e07..95295935dd9 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev, pci_set_master(pdev); - rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, + rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &atp867x_sht); if (rc) dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 02c81f12c70..9cae65de750 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -821,6 +821,18 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device) } /** + * bfin_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + */ + +static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl) +{ + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + write_atapi_register(base, ATA_REG_CTRL, ctl); +} + +/** * bfin_bmdma_setup - Set up IDE DMA transaction * @qc: Info associated with this ATA transaction. * @@ -1202,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, * bfin_irq_clear - Clear ATAPI interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void bfin_irq_clear(struct ata_port *ap) @@ -1216,56 +1228,6 @@ static void bfin_irq_clear(struct ata_port *ap) } /** - * bfin_irq_on - Enable interrupts on a port. - * @ap: Port on which interrupts are enabled. - * - * Note: Original code is ata_sff_irq_on(). - */ - -static unsigned char bfin_irq_on(struct ata_port *ap) -{ - void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; - u8 tmp; - - dev_dbg(ap->dev, "in atapi irq on\n"); - ap->ctl &= ~ATA_NIEN; - ap->last_ctl = ap->ctl; - - write_atapi_register(base, ATA_REG_CTRL, ap->ctl); - tmp = ata_wait_idle(ap); - - bfin_irq_clear(ap); - - return tmp; -} - -/** - * bfin_freeze - Freeze DMA controller port - * @ap: port to freeze - * - * Note: Original code is ata_sff_freeze(). - */ - -static void bfin_freeze(struct ata_port *ap) -{ - void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; - - dev_dbg(ap->dev, "in atapi dma freeze\n"); - ap->ctl |= ATA_NIEN; - ap->last_ctl = ap->ctl; - - write_atapi_register(base, ATA_REG_CTRL, ap->ctl); - - /* Under certain circumstances, some controllers raise IRQ on - * ATA_NIEN manipulation. Also, many controllers fail to mask - * previously pending IRQ on ATA_NIEN assertion. Clear it. - */ - ap->ops->sff_check_status(ap); - - bfin_irq_clear(ap); -} - -/** * bfin_thaw - Thaw DMA controller port * @ap: port to thaw * @@ -1276,7 +1238,7 @@ void bfin_thaw(struct ata_port *ap) { dev_dbg(ap->dev, "in atapi dma thaw\n"); bfin_check_status(ap); - bfin_irq_on(ap); + ata_sff_irq_on(ap); } /** @@ -1293,7 +1255,7 @@ static void bfin_postreset(struct ata_link *link, unsigned int *classes) void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; /* re-enable interrupts */ - bfin_irq_on(ap); + ata_sff_irq_on(ap); /* is double-select really necessary? */ if (classes[0] != ATA_DEV_NONE) @@ -1438,18 +1400,12 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && - (qc->flags & ATA_QCFLAG_ACTIVE)) - handled |= bfin_ata_host_intr(ap, qc); - } + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) + handled |= bfin_ata_host_intr(ap, qc); } spin_unlock_irqrestore(&host->lock, flags); @@ -1465,7 +1421,7 @@ static struct scsi_host_template bfin_sht = { }; static struct ata_port_operations bfin_pata_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, .set_piomode = bfin_set_piomode, .set_dmamode = bfin_set_dmamode, @@ -1476,6 +1432,7 @@ static struct ata_port_operations bfin_pata_ops = { .sff_check_status = bfin_check_status, .sff_check_altstatus = bfin_check_altstatus, .sff_dev_select = bfin_dev_select, + .sff_set_devctl = bfin_set_devctl, .bmdma_setup = bfin_bmdma_setup, .bmdma_start = bfin_bmdma_start, @@ -1485,13 +1442,11 @@ static struct ata_port_operations bfin_pata_ops = { .qc_prep = ata_noop_qc_prep, - .freeze = bfin_freeze, .thaw = bfin_thaw, .softreset = bfin_softreset, .postreset = bfin_postreset, .sff_irq_clear = bfin_irq_clear, - .sff_irq_on = bfin_irq_on, .port_start = bfin_port_start, .port_stop = bfin_port_stop, diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index 45896b3c653..e5f289f59ca 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c @@ -153,24 +153,20 @@ static int cmd640_port_start(struct ata_port *ap) struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct cmd640_reg *timing; - int ret = ata_sff_port_start(ap); - if (ret < 0) - return ret; - timing = devm_kzalloc(&pdev->dev, sizeof(struct cmd640_reg), GFP_KERNEL); if (timing == NULL) return -ENOMEM; timing->last = -1; /* Force a load */ ap->private_data = timing; - return ret; + return 0; } static struct scsi_host_template cmd640_sht = { - ATA_BMDMA_SHT(DRV_NAME), + ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations cmd640_port_ops = { - .inherits = &ata_bmdma_port_ops, + .inherits = &ata_sff_port_ops, /* In theory xfer_noirq is not needed once we kill the prefetcher */ .sff_data_xfer = ata_sff_data_xfer_noirq, .qc_issue = cmd640_qc_issue, @@ -181,13 +177,10 @@ static struct ata_port_operations cmd640_port_ops = { static void cmd640_hardware_init(struct pci_dev *pdev) { - u8 r; u8 ctrl; /* CMD640 detected, commiserations */ pci_write_config_byte(pdev, 0x5B, 0x00); - /* Get version info */ - pci_read_config_byte(pdev, CFR, &r); /* PIO0 command cycles */ pci_write_config_byte(pdev, CMDTIM, 0); /* 512 byte bursts (sector) */ diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 4c81a71b887..9f5da1c7454 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_write_config_byte(pdev, UDIDETCR0, 0xF0); #endif - return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 95ebdac517f..030952f1f97 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -110,7 +110,7 @@ static struct scsi_host_template cs5520_sht = { static struct ata_port_operations cs5520_port_ops = { .inherits = &ata_bmdma_port_ops, - .qc_prep = ata_sff_dumb_qc_prep, + .qc_prep = ata_bmdma_dumb_qc_prep, .cable_detect = ata_cable_40wire, .set_piomode = cs5520_set_piomode, }; @@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], - ata_sff_interrupt, 0, DRV_NAME, host); + ata_bmdma_interrupt, 0, DRV_NAME, host); if (rc) return rc; diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index 738ad2e14a9..f792330f0d8 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -156,7 +156,7 @@ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) cs5530_set_dmamode(ap, adev); } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } static struct scsi_host_template cs5530_sht = { @@ -167,7 +167,7 @@ static struct scsi_host_template cs5530_sht = { static struct ata_port_operations cs5530_port_ops = { .inherits = &ata_bmdma_port_ops, - .qc_prep = ata_sff_dumb_qc_prep, + .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = cs5530_qc_issue, .cable_detect = ata_cable_40wire, @@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[1] = &info_palmax_secondary; /* Now kick off ATA set up */ - return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index a02e6459fdc..03a93186aa1 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) rdmsr(ATAC_CH0D1_PIO, timings, dummy); if (CS5535_BAD_PIO(timings)) wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); - return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0); } static const struct pci_device_id cs5535[] = { diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 914ae3506ff..21ee23f89e8 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); } static const struct pci_device_id cs5536[] = { diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 0fcc096b8da..6d915b063d9 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; - return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); } static const struct pci_device_id cy82c693[] = { diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 3bac0e07969..a08834758ea 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id efar_pci_tbl[] = { diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index af49bfb5724..7688868557b 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -182,7 +182,7 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) } else if (adev->class == ATA_DEV_ATAPI) mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); - return ata_bmdma_mode_filter(adev, mask); + return mask; } static int hpt36x_cable_detect(struct ata_port *ap) @@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) break; } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 8839307a64c..9ae4c083057 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -282,7 +282,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -298,7 +298,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } static const struct pci_device_id hpt37x[] = { diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 01457b266f3..32f3463216b 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -320,7 +320,7 @@ static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } static struct scsi_host_template hpt3x2n_sht = { @@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } static const struct pci_device_id hpt3x2n[] = { diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index 727a81ce4c9..b63d5e2d462 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &hpt3x3_sht); } diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index fa812e206ee..9f2889fe43b 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -321,7 +321,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes) } static struct ata_port_operations pata_icside_port_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, /* no need to build any PRD tables for DMA */ .qc_prep = ata_noop_qc_prep, .sff_data_xfer = ata_sff_data_xfer_noirq, @@ -333,7 +333,8 @@ static struct ata_port_operations pata_icside_port_ops = { .cable_detect = ata_cable_40wire, .set_dmamode = pata_icside_set_dmamode, .postreset = pata_icside_postreset, - .post_internal_cmd = pata_icside_bmdma_stop, + + .port_start = ATA_OP_NULL, /* don't need PRD table */ }; static void __devinit @@ -469,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); } - return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, + return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0, &pata_icside_sht); } diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index f971f0de88e..4d142a2ab8f 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0); } static const struct pci_device_id it8213_pci_tbl[] = { diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 5cb286fd839..bf88f71a21f 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -430,7 +430,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) case 0xFC: /* Internal 'report rebuild state' */ /* Arguably should just no-op this one */ case ATA_CMD_SET_FEATURES: - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command); return AC_ERR_DEV; @@ -448,7 +448,7 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) static unsigned int it821x_passthru_qc_issue(struct ata_queued_cmd *qc) { it821x_passthru_dev_select(qc->ap, qc->dev->devno); - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } /** @@ -739,7 +739,7 @@ static int it821x_port_start(struct ata_port *ap) struct it821x_dev *itdev; u8 conf; - int ret = ata_sff_port_start(ap); + int ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; @@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) else ppi[0] = &info_smart; } - return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 565e01e6ac7..cb3babbb703 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } static const struct pci_device_id jmicron_pci_tbl[] = { diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 211b6438b3a..75b49d01780 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -720,6 +720,8 @@ static int pata_macio_port_start(struct ata_port *ap) if (priv->dma_table_cpu == NULL) { dev_err(priv->dev, "Unable to allocate DMA command list\n"); ap->ioaddr.bmdma_addr = NULL; + ap->mwdma_mask = 0; + ap->udma_mask = 0; } return 0; } @@ -917,7 +919,7 @@ static struct scsi_host_template pata_macio_sht = { }; static struct ata_port_operations pata_macio_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, .freeze = pata_macio_freeze, .set_piomode = pata_macio_set_timings, @@ -925,7 +927,6 @@ static struct ata_port_operations pata_macio_ops = { .cable_detect = pata_macio_cable_detect, .sff_dev_select = pata_macio_dev_select, .qc_prep = pata_macio_qc_prep, - .mode_filter = ata_bmdma_mode_filter, .bmdma_setup = pata_macio_bmdma_setup, .bmdma_start = pata_macio_bmdma_start, .bmdma_stop = pata_macio_bmdma_stop, @@ -1109,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, /* Start it up */ priv->irq = irq; - return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, + return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, &pata_macio_sht); } @@ -1139,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev, "Failed to allocate private memory\n"); return -ENOMEM; } - priv->node = of_node_get(mdev->ofdev.node); + priv->node = of_node_get(mdev->ofdev.dev.of_node); priv->mdev = mdev; priv->dev = &mdev->ofdev.dev; @@ -1354,8 +1355,11 @@ static struct of_device_id pata_macio_match[] = static struct macio_driver pata_macio_driver = { - .name = "pata-macio", - .match_table = pata_macio_match, + .driver = { + .name = "pata-macio", + .owner = THIS_MODULE, + .of_match_table = pata_macio_match, + }, .probe = pata_macio_attach, .remove = pata_macio_detach, #ifdef CONFIG_PM @@ -1365,9 +1369,6 @@ static struct macio_driver pata_macio_driver = #ifdef CONFIG_PMAC_MEDIABAY .mediabay_event = pata_macio_mb_event, #endif - .driver = { - .owner = THIS_MODULE, - }, }; static const struct pci_device_id pata_macio_pci_match[] = { diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index e8ca02e5a71..dd38083dcbe 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i return -ENODEV; } #endif - return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 9f5b053611d..f087ab55b1d 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -64,13 +64,13 @@ struct mpc52xx_ata_priv { /* ATAPI-4 PIO specs (in ns) */ -static const int ataspec_t0[5] = {600, 383, 240, 180, 120}; -static const int ataspec_t1[5] = { 70, 50, 30, 30, 25}; -static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70}; -static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70}; -static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25}; -static const int ataspec_t4[5] = { 30, 20, 15, 10, 10}; -static const int ataspec_ta[5] = { 35, 35, 35, 35, 35}; +static const u16 ataspec_t0[5] = {600, 383, 240, 180, 120}; +static const u16 ataspec_t1[5] = { 70, 50, 30, 30, 25}; +static const u16 ataspec_t2_8[5] = {290, 290, 290, 80, 70}; +static const u16 ataspec_t2_16[5] = {165, 125, 100, 80, 70}; +static const u16 ataspec_t2i[5] = { 0, 0, 0, 70, 25}; +static const u16 ataspec_t4[5] = { 30, 20, 15, 10, 10}; +static const u16 ataspec_ta[5] = { 35, 35, 35, 35, 35}; #define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c))) @@ -78,13 +78,13 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35}; /* ATAPI-4 MDMA specs (in clocks) */ struct mdmaspec { - u32 t0M; - u32 td; - u32 th; - u32 tj; - u32 tkw; - u32 tm; - u32 tn; + u8 t0M; + u8 td; + u8 th; + u8 tj; + u8 tkw; + u8 tm; + u8 tn; }; static const struct mdmaspec mdmaspec66[3] = { @@ -101,23 +101,23 @@ static const struct mdmaspec mdmaspec132[3] = { /* ATAPI-4 UDMA specs (in clocks) */ struct udmaspec { - u32 tcyc; - u32 t2cyc; - u32 tds; - u32 tdh; - u32 tdvs; - u32 tdvh; - u32 tfs; - u32 tli; - u32 tmli; - u32 taz; - u32 tzah; - u32 tenv; - u32 tsr; - u32 trfs; - u32 trp; - u32 tack; - u32 tss; + u8 tcyc; + u8 t2cyc; + u8 tds; + u8 tdh; + u8 tdvs; + u8 tdvh; + u8 tfs; + u8 tli; + u8 tmli; + u8 taz; + u8 tzah; + u8 tenv; + u8 tsr; + u8 trfs; + u8 trp; + u8 tack; + u8 tss; }; static const struct udmaspec udmaspec66[6] = { @@ -270,7 +270,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio) { struct mpc52xx_ata_timings *timing = &priv->timings[dev]; unsigned int ipb_period = priv->ipb_period; - unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta; + u32 t0, t1, t2_8, t2_16, t2i, t4, ta; if ((pio < 0) || (pio > 4)) return -EINVAL; @@ -299,8 +299,8 @@ mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev, if (speed < 0 || speed > 2) return -EINVAL; - t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm); - t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8); + t->mdma1 = ((u32)s->t0M << 24) | ((u32)s->td << 16) | ((u32)s->tkw << 8) | s->tm; + t->mdma2 = ((u32)s->th << 24) | ((u32)s->tj << 16) | ((u32)s->tn << 8); t->using_udma = 0; return 0; @@ -316,11 +316,11 @@ mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev, if (speed < 0 || speed > 2) return -EINVAL; - t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh; - t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli; - t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr; - t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack; - t->udma5 = (s->tzah << 24); + t->udma1 = ((u32)s->t2cyc << 24) | ((u32)s->tcyc << 16) | ((u32)s->tds << 8) | s->tdh; + t->udma2 = ((u32)s->tdvs << 24) | ((u32)s->tdvh << 16) | ((u32)s->tfs << 8) | s->tli; + t->udma3 = ((u32)s->tmli << 24) | ((u32)s->taz << 16) | ((u32)s->tenv << 8) | s->tsr; + t->udma4 = ((u32)s->tss << 24) | ((u32)s->trfs << 16) | ((u32)s->trp << 8) | s->tack; + t->udma5 = (u32)s->tzah << 24; t->using_udma = 1; return 0; @@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); /* activate host */ - return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, + return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, &mpc52xx_ata_sht); } @@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) struct bcom_task *dmatsk = NULL; /* Get ipb frequency */ - ipb_freq = mpc5xxx_get_bus_frequency(op->node); + ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); if (!ipb_freq) { dev_err(&op->dev, "could not determine IPB bus frequency\n"); return -ENODEV; @@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) /* Get device base address from device tree, request the region * and ioremap it. */ - rv = of_address_to_resource(op->node, 0, &res_mem); + rv = of_address_to_resource(op->dev.of_node, 0, &res_mem); if (rv) { dev_err(&op->dev, "could not determine device base address\n"); return rv; @@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match) * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and * UDMA modes 0, 1 and 2. */ - prop = of_get_property(op->node, "mwdma-mode", &proplen); + prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen); if ((prop) && (proplen >= 4)) mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); - prop = of_get_property(op->node, "udma-mode", &proplen); + prop = of_get_property(op->dev.of_node, "udma-mode", &proplen); if ((prop) && (proplen >= 4)) udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); - ata_irq = irq_of_parse_and_map(op->node, 0); + ata_irq = irq_of_parse_and_map(op->dev.of_node, 0); if (ata_irq == NO_IRQ) { dev_err(&op->dev, "error mapping irq\n"); return -EINVAL; @@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = { static struct of_platform_driver mpc52xx_ata_of_platform_driver = { - .owner = THIS_MODULE, - .name = DRV_NAME, - .match_table = mpc52xx_ata_of_match, .probe = mpc52xx_ata_probe, .remove = mpc52xx_ata_remove, #ifdef CONFIG_PM @@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, + .of_match_table = mpc52xx_ata_of_match, }, }; diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 94f979a7f4f..3eb921c746a 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e ata_pci_bmdma_clear_simplex(pdev); /* And let the library code do the work */ - return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0); } static const struct pci_device_id netcell_pci_tbl[] = { diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index dd53a66b19e..cc50bd09aa2 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ninja32_program(base); /* FIXME: Should we disable them at remove ? */ - return ata_host_activate(host, dev->irq, ata_sff_interrupt, + return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, IRQF_SHARED, &ninja32_sht); } diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index 830431f036a..605f198f958 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -126,7 +126,7 @@ static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) /* load PRD table addr. */ mb(); /* make sure PRD table writes are visible to controller */ - iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); @@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e ns87415_fixup(pdev); - return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0); } static const struct pci_device_id ns87415_pci_tbl[] = { diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 005a44483a7..06ddd91ffed 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -489,9 +489,8 @@ static void octeon_cf_exec_command16(struct ata_port *ap, ata_wait_idle(ap); } -static u8 octeon_cf_irq_on(struct ata_port *ap) +static void octeon_cf_irq_on(struct ata_port *ap) { - return 0; } static void octeon_cf_irq_clear(struct ata_port *ap) @@ -655,9 +654,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) ap = host->ports[i]; ocd = ap->dev->platform_data; - if (ap->flags & ATA_FLAG_DISABLED) - continue; - ocd = ap->dev->platform_data; cf_port = ap->private_data; dma_int.u64 = @@ -667,8 +663,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && - (qc->flags & ATA_QCFLAG_ACTIVE)) { + if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) { if (dma_int.s.done && !dma_cfg.s.en) { if (!sg_is_last(qc->cursg)) { qc->cursg = sg_next(qc->cursg); @@ -738,8 +733,7 @@ static void octeon_cf_delayed_finish(struct work_struct *work) goto out; } qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && - (qc->flags & ATA_QCFLAG_ACTIVE)) + if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) octeon_cf_dma_finished(ap, qc); out: spin_unlock_irqrestore(&host->lock, flags); @@ -756,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev) } /* - * Trap if driver tries to do standard bmdma commands. They are not - * supported. - */ -static void unreachable_qc(struct ata_queued_cmd *qc) -{ - BUG(); -} - -static u8 unreachable_port(struct ata_port *ap) -{ - BUG(); -} - -/* * We don't do ATAPI DMA so return 0. */ static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) @@ -810,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = { .sff_dev_select = octeon_cf_dev_select, .sff_irq_on = octeon_cf_irq_on, .sff_irq_clear = octeon_cf_irq_clear, - .bmdma_setup = unreachable_qc, - .bmdma_start = unreachable_qc, - .bmdma_stop = unreachable_qc, - .bmdma_status = unreachable_port, .cable_detect = ata_cable_40wire, .set_piomode = octeon_cf_set_piomode, .set_dmamode = octeon_cf_set_dmamode, diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index 1f18ad9e4fe..5a1b82c08be 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c @@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev, const struct of_device_id *match) { int ret; - struct device_node *dn = ofdev->node; + struct device_node *dn = ofdev->dev.of_node; struct resource io_res; struct resource ctl_res; struct resource irq_res; @@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = { MODULE_DEVICE_TABLE(of, pata_of_platform_match); static struct of_platform_driver pata_of_platform_driver = { - .name = "pata_of_platform", - .match_table = pata_of_platform_match, + .driver = { + .name = "pata_of_platform", + .owner = THIS_MODULE, + .of_match_table = pata_of_platform_match, + }, .probe = pata_of_platform_probe, .remove = __devexit_p(pata_of_platform_remove), }; diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 5f6aba7eb0d..b811c163620 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -200,7 +200,7 @@ static unsigned int oldpiix_qc_issue(struct ata_queued_cmd *qc) if (ata_dma_enabled(adev)) oldpiix_set_dmamode(ap, adev); } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } @@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); } static const struct pci_device_id oldpiix_pci_tbl[] = { diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 76b7d12b1e8..0852cd07de0 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (optiplus_with_udma(dev)) ppi[0] = &info_82c700_udma; - return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0); } static const struct pci_device_id optidma[] = { diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index d94b8f0bd74..118c28e8aba 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -45,16 +45,6 @@ #define DRV_NAME "pata_pcmcia" #define DRV_VERSION "0.3.5" -/* - * Private data structure to glue stuff together - */ - -struct ata_pcmcia_info { - struct pcmcia_device *pdev; - int ndev; - dev_node_t node; -}; - /** * pcmcia_set_mode - PCMCIA specific mode setup * @link: link @@ -175,7 +165,7 @@ static struct ata_port_operations pcmcia_8bit_port_ops = { .sff_data_xfer = ata_data_xfer_8bit, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode_8bit, - .drain_fifo = pcmcia_8bit_drain_fifo, + .sff_drain_fifo = pcmcia_8bit_drain_fifo, }; @@ -248,7 +238,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) { struct ata_host *host; struct ata_port *ap; - struct ata_pcmcia_info *info; struct pcmcia_config_check *stk = NULL; int is_kme = 0, ret = -ENOMEM, p; unsigned long io_base, ctl_base; @@ -256,19 +245,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) int n_ports = 1; struct ata_port_operations *ops = &pcmcia_port_ops; - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info == NULL) - return -ENOMEM; - - /* Glue stuff together. FIXME: We may be able to get rid of info with care */ - info->pdev = pdev; - pdev->priv = info; - /* Set up attributes in order to probe card and get resources */ pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; pdev->io.IOAddrLines = 3; - pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; pdev->conf.Attributes = CONF_ENABLE_IRQ; pdev->conf.IntType = INT_MEMORY_AND_IO; @@ -293,8 +273,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) } io_base = pdev->io.BasePort1; ctl_base = stk->ctl_base; - ret = pcmcia_request_irq(pdev, &pdev->irq); - if (ret) + if (!pdev->irq) goto failed; ret = pcmcia_request_configuration(pdev, &pdev->conf); @@ -344,21 +323,19 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) } /* activate */ - ret = ata_host_activate(host, pdev->irq.AssignedIRQ, ata_sff_interrupt, + ret = ata_host_activate(host, pdev->irq, ata_sff_interrupt, IRQF_SHARED, &pcmcia_sht); if (ret) goto failed; - info->ndev = 1; + pdev->priv = host; kfree(stk); return 0; failed: kfree(stk); - info->ndev = 0; pcmcia_disable_device(pdev); out1: - kfree(info); return ret; } @@ -372,20 +349,12 @@ out1: static void pcmcia_remove_one(struct pcmcia_device *pdev) { - struct ata_pcmcia_info *info = pdev->priv; - struct device *dev = &pdev->dev; - - if (info != NULL) { - /* If we have attached the device to the ATA layer, detach it */ - if (info->ndev) { - struct ata_host *host = dev_get_drvdata(dev); - ata_host_detach(host); - } - info->ndev = 0; - pdev->priv = NULL; - } + struct ata_host *host = pdev->priv; + + if (host) + ata_host_detach(host); + pcmcia_disable_device(pdev); - kfree(info); } static struct pcmcia_device_id pcmcia_devices[] = { diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index ca5cad0fd80..b1835112252 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -265,7 +265,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long struct ata_device *pair = ata_dev_pair(adev); if (adev->class != ATA_DEV_ATA || adev->devno == 0 || pair == NULL) - return ata_bmdma_mode_filter(adev, mask); + return mask; /* Check for slave of a Maxtor at UDMA6 */ ata_id_c_string(pair->id, model_num, ATA_ID_PROD, @@ -274,7 +274,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long if (strstr(model_num, "Maxtor") == NULL && pair->dma_mode == XFER_UDMA_6) mask &= ~ (1 << (6 + ATA_SHIFT_UDMA)); - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de return -EIO; pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &pdc2027x_sht); } diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index 9ac0897cf8b..c39f213e1bb 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -249,7 +249,7 @@ static int pdc2026x_port_start(struct ata_port *ap) u8 burst = ioread8(bmdma + 0x1f); iowrite8(burst | 0x01, bmdma + 0x1f); } - return ata_sff_port_start(ap); + return ata_bmdma_port_start(ap); } /** @@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id return -ENODEV; } } - return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); } static const struct pci_device_id pdc202xx[] = { diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c index 98161541484..cb01bf9496f 100644 --- a/drivers/ata/pata_piccolo.c +++ b/drivers/ata/pata_piccolo.c @@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id }; const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; /* Just one port for the moment */ - return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); } static struct pci_device_id ata_tosh[] = { diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 3f6ebc6c665..50400fa120f 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c @@ -53,7 +53,6 @@ static struct ata_port_operations pata_platform_port_ops = { .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_unknown, .set_mode = pata_platform_set_mode, - .port_start = ATA_OP_NULL, }; static void pata_platform_setup_port(struct ata_ioports *ioaddr, diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index fc9602229ac..8574b31f177 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c @@ -179,7 +179,7 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc) radisys_set_piomode(ap, adev); } } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } @@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); } static const struct pci_device_id radisys_pci_tbl[] = { diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 37092cfd7bc..5fbe9b166c6 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c @@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, */ pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht); } static void rdc_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index dfecc6f964b..e2c18257adf 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -174,7 +174,7 @@ static unsigned int sc1200_qc_issue(struct ata_queued_cmd *qc) sc1200_set_dmamode(ap, adev); } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } /** @@ -209,7 +209,7 @@ static struct scsi_host_template sc1200_sht = { static struct ata_port_operations sc1200_port_ops = { .inherits = &ata_bmdma_port_ops, - .qc_prep = ata_sff_dumb_qc_prep, + .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = sc1200_qc_issue, .qc_defer = sc1200_qc_defer, .cable_detect = ata_cable_40wire, @@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); } static const struct pci_device_id sc1200[] = { diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 4257d6b40af..d9db3f8d60e 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -265,7 +265,7 @@ unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -416,6 +416,17 @@ static void scc_dev_select (struct ata_port *ap, unsigned int device) } /** + * scc_set_devctl - Write device control reg + * @ap: port where the device is + * @ctl: value to write + */ + +static void scc_set_devctl(struct ata_port *ap, u8 ctl) +{ + out_be32(ap->ioaddr.ctl_addr, ctl); +} + +/** * scc_bmdma_setup - Set up PCI IDE BMDMA transaction * @qc: Info associated with this ATA transaction. * @@ -430,7 +441,7 @@ static void scc_bmdma_setup (struct ata_queued_cmd *qc) void __iomem *mmio = ap->ioaddr.bmdma_addr; /* load PRD table addr */ - out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma); + out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma); /* specify data direction, triple-check start bit is clear */ dmactl = in_be32(mmio + SCC_DMA_CMD); @@ -501,8 +512,8 @@ static unsigned int scc_devchk (struct ata_port *ap, * Note: Original code is ata_sff_wait_after_reset */ -int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, - unsigned long deadline) +static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, + unsigned long deadline) { struct ata_port *ap = link->ap; struct ata_ioports *ioaddr = &ap->ioaddr; @@ -817,54 +828,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, } /** - * scc_irq_on - Enable interrupts on a port. - * @ap: Port on which interrupts are enabled. - * - * Note: Original code is ata_sff_irq_on(). - */ - -static u8 scc_irq_on (struct ata_port *ap) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - u8 tmp; - - ap->ctl &= ~ATA_NIEN; - ap->last_ctl = ap->ctl; - - out_be32(ioaddr->ctl_addr, ap->ctl); - tmp = ata_wait_idle(ap); - - ap->ops->sff_irq_clear(ap); - - return tmp; -} - -/** - * scc_freeze - Freeze BMDMA controller port - * @ap: port to freeze - * - * Note: Original code is ata_sff_freeze(). - */ - -static void scc_freeze (struct ata_port *ap) -{ - struct ata_ioports *ioaddr = &ap->ioaddr; - - ap->ctl |= ATA_NIEN; - ap->last_ctl = ap->ctl; - - out_be32(ioaddr->ctl_addr, ap->ctl); - - /* Under certain circumstances, some controllers raise IRQ on - * ATA_NIEN manipulation. Also, many controllers fail to mask - * previously pending IRQ on ATA_NIEN assertion. Clear it. - */ - ap->ops->sff_check_status(ap); - - ap->ops->sff_irq_clear(ap); -} - -/** * scc_pata_prereset - prepare for reset * @ap: ATA port to be reset * @deadline: deadline jiffies for the operation @@ -903,8 +866,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes) } /* set up device control */ - if (ap->ioaddr.ctl_addr) - out_be32(ap->ioaddr.ctl_addr, ap->ctl); + out_be32(ap->ioaddr.ctl_addr, ap->ctl); DPRINTK("EXIT\n"); } @@ -913,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes) * scc_irq_clear - Clear PCI IDE BMDMA interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void scc_irq_clear (struct ata_port *ap) @@ -930,7 +892,7 @@ static void scc_irq_clear (struct ata_port *ap) * scc_port_start - Set port up for dma. * @ap: Port to initialize * - * Allocate space for PRD table using ata_port_start(). + * Allocate space for PRD table using ata_bmdma_port_start(). * Set PRD table address for PTERADD. (PRD Transfer End Read) */ @@ -939,11 +901,11 @@ static int scc_port_start (struct ata_port *ap) void __iomem *mmio = ap->ioaddr.bmdma_addr; int rc; - rc = ata_port_start(ap); + rc = ata_bmdma_port_start(ap); if (rc) return rc; - out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma); + out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma); return 0; } @@ -978,6 +940,7 @@ static struct ata_port_operations scc_pata_ops = { .sff_check_status = scc_check_status, .sff_check_altstatus = scc_check_altstatus, .sff_dev_select = scc_dev_select, + .sff_set_devctl = scc_set_devctl, .bmdma_setup = scc_bmdma_setup, .bmdma_start = scc_bmdma_start, @@ -985,14 +948,11 @@ static struct ata_port_operations scc_pata_ops = { .bmdma_status = scc_bmdma_status, .sff_data_xfer = scc_data_xfer, - .freeze = scc_freeze, .prereset = scc_pata_prereset, .softreset = scc_softreset, .postreset = scc_postreset, - .post_internal_cmd = scc_bmdma_stop, .sff_irq_clear = scc_irq_clear, - .sff_irq_on = scc_irq_on, .port_start = scc_port_start, .port_stop = scc_port_stop, @@ -1145,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &scc_sht); } diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c index 99cceb458e2..e97b32f03a6 100644 --- a/drivers/ata/pata_sch.c +++ b/drivers/ata/pata_sch.c @@ -174,22 +174,12 @@ static int __devinit sch_init_one(struct pci_dev *pdev, { static int printed_version; const struct ata_port_info *ppi[] = { &sch_port_info, NULL }; - struct ata_host *host; - int rc; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - /* enable device and prepare host */ - rc = pcim_enable_device(pdev); - if (rc) - return rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); - if (rc) - return rc; - pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht); + return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); } static int __init sch_init(void) diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 9524d54035f..86dd714e3e1 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -198,7 +198,7 @@ static unsigned long serverworks_osb4_filter(struct ata_device *adev, unsigned l { if (adev->class == ATA_DEV_ATA) mask &= ~ATA_MASK_UDMA; - return ata_bmdma_mode_filter(adev, mask); + return mask; } @@ -218,7 +218,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo /* Disk, UDMA */ if (adev->class != ATA_DEV_ATA) - return ata_bmdma_mode_filter(adev, mask); + return mask; /* Actually do need to check */ ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); @@ -227,7 +227,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo if (!strcmp(p, model_num)) mask &= ~(0xE0 << ATA_SHIFT_UDMA); } - return ata_bmdma_mode_filter(adev, mask); + return mask; } /** @@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ata_pci_bmdma_clear_simplex(pdev); - return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index c6c589c23ff..d3190d7ec30 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -190,15 +190,37 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) pci_write_config_word(pdev, ua, ultra); } +/** + * sil680_sff_exec_command - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA command, with proper synchronization with interrupt + * handler / other threads. Use our MMIO space for PCI posting to avoid + * a hideously slow cycle all the way to the device. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void sil680_sff_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); + iowrite8(tf->command, ap->ioaddr.command_addr); + ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + static struct scsi_host_template sil680_sht = { ATA_BMDMA_SHT(DRV_NAME), }; + static struct ata_port_operations sil680_port_ops = { - .inherits = &ata_bmdma32_port_ops, - .cable_detect = sil680_cable_detect, - .set_piomode = sil680_set_piomode, - .set_dmamode = sil680_set_dmamode, + .inherits = &ata_bmdma32_port_ops, + .sff_exec_command = sil680_sff_exec_command, + .cable_detect = sil680_cable_detect, + .set_piomode = sil680_set_piomode, + .set_dmamode = sil680_set_dmamode, }; /** @@ -352,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, ata_sff_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: - return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index b6708032f32..60cea13cccc 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) sis_fixup(pdev, chipset); - return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 733b042a746..98548f640c8 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; pci_write_config_dword(dev, 0x40, val); - return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); } static const struct pci_device_id sl82c105[] = { diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 48f50600ed2..0d1f89e571d 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (!printed_version++) dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); } static const struct pci_device_id triflex[] = { diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 741e7cb69d8..5e659885de1 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -355,7 +355,7 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) mask &= ~ ATA_MASK_UDMA; } } - return ata_bmdma_mode_filter(dev, mask); + return mask; } /** @@ -417,8 +417,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) tf->lbam, tf->lbah); } - - ata_wait_idle(ap); } static int via_port_start(struct ata_port *ap) @@ -426,7 +424,7 @@ static int via_port_start(struct ata_port *ap) struct via_port *vp; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - int ret = ata_sff_port_start(ap); + int ret = ata_bmdma_port_start(ap); if (ret < 0) return ret; @@ -629,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* We have established the device type, now fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index 5904cfdb8db..adbe0426c8f 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -324,10 +324,8 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) VPRINTK("ENTER\n"); adma_enter_reg_mode(qc->ap); - if (qc->tf.protocol != ATA_PROT_DMA) { - ata_sff_qc_prep(qc); + if (qc->tf.protocol != ATA_PROT_DMA) return; - } buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ @@ -442,8 +440,6 @@ static inline unsigned int adma_intr_pkt(struct ata_host *host) continue; handled = 1; adma_enter_reg_mode(ap); - if (ap->flags & ATA_FLAG_DISABLED) - continue; pp = ap->private_data; if (!pp || pp->state != adma_state_pkt) continue; @@ -484,42 +480,38 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host) unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { - struct ata_port *ap; - ap = host->ports[port_no]; - if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { - struct ata_queued_cmd *qc; - struct adma_port_priv *pp = ap->private_data; - if (!pp || pp->state != adma_state_mmio) + struct ata_port *ap = host->ports[port_no]; + struct adma_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + + if (!pp || pp->state != adma_state_mmio) + continue; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + + /* check main status, clearing INTRQ */ + u8 status = ata_sff_check_status(ap); + if ((status & ATA_BUSY)) continue; - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - - /* check main status, clearing INTRQ */ - u8 status = ata_sff_check_status(ap); - if ((status & ATA_BUSY)) - continue; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->print_id, qc->tf.protocol, status); - - /* complete taskfile transaction */ - pp->state = adma_state_idle; - qc->err_mask |= ac_err_mask(status); - if (!qc->err_mask) - ata_qc_complete(qc); - else { - struct ata_eh_info *ehi = - &ap->link.eh_info; - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, - "status 0x%02X", status); - - if (qc->err_mask == AC_ERR_DEV) - ata_port_abort(ap); - else - ata_port_freeze(ap); - } - handled = 1; + DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", + ap->print_id, qc->tf.protocol, status); + + /* complete taskfile transaction */ + pp->state = adma_state_idle; + qc->err_mask |= ac_err_mask(status); + if (!qc->err_mask) + ata_qc_complete(qc); + else { + struct ata_eh_info *ehi = &ap->link.eh_info; + ata_ehi_clear_desc(ehi); + ata_ehi_push_desc(ehi, "status 0x%02X", status); + + if (qc->err_mask == AC_ERR_DEV) + ata_port_abort(ap); + else + ata_port_freeze(ap); } + handled = 1; } } return handled; @@ -562,11 +554,7 @@ static int adma_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct adma_port_priv *pp; - int rc; - rc = ata_port_start(ap); - if (rc) - return rc; adma_enter_reg_mode(ap); pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index a69192b38b4..61c89b54ea2 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev, dev_printk(KERN_INFO, &ofdev->dev, "Sata FSL Platform/CSB Driver init\n"); - hcr_base = of_iomap(ofdev->node, 0); + hcr_base = of_iomap(ofdev->dev.of_node, 0); if (!hcr_base) goto error_exit_with_cleanup; @@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev, host_priv->ssr_base = ssr_base; host_priv->csr_base = csr_base; - irq = irq_of_parse_and_map(ofdev->node, 0); + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (irq < 0) { dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n"); goto error_exit_with_cleanup; @@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = { MODULE_DEVICE_TABLE(of, fsl_sata_match); static struct of_platform_driver fsl_sata_driver = { - .name = "fsl-sata", - .match_table = fsl_sata_match, + .driver = { + .name = "fsl-sata", + .owner = THIS_MODULE, + .of_match_table = fsl_sata_match, + }, .probe = sata_fsl_probe, .remove = sata_fsl_remove, #ifdef CONFIG_PM diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 27dc6c86a4c..a36149ebf4a 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -415,22 +415,11 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance) spin_lock(&host->lock); - for (i = 0; i < NR_PORTS; i++) { - struct ata_port *ap = host->ports[i]; - - if (!(host_irq_stat & (HIRQ_PORT0 << i))) - continue; - - if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { - inic_host_intr(ap); + for (i = 0; i < NR_PORTS; i++) + if (host_irq_stat & (HIRQ_PORT0 << i)) { + inic_host_intr(host->ports[i]); handled++; - } else { - if (ata_ratelimit()) - dev_printk(KERN_ERR, host->dev, "interrupt " - "from disabled port %d (0x%x)\n", - i, host_irq_stat); } - } spin_unlock(&host->lock); @@ -679,8 +668,7 @@ static void init_port(struct ata_port *ap) memset(pp->pkt, 0, sizeof(struct inic_pkt)); memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); - /* setup PRD and CPB lookup table addresses */ - writel(ap->prd_dma, port_base + PORT_PRD_ADDR); + /* setup CPB lookup table addresses */ writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); } @@ -694,7 +682,6 @@ static int inic_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct inic_port_priv *pp; - int rc; /* alloc and initialize private data */ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); @@ -703,10 +690,6 @@ static int inic_port_start(struct ata_port *ap) ap->private_data = pp; /* Alloc resources */ - rc = ata_port_start(ap); - if (rc) - return rc; - pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 71cc0d42f9e..a476cd99b95 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = { .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, - .error_handler = ata_std_error_handler, /* avoid SFF EH */ - .post_internal_cmd = ATA_OP_NULL, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, @@ -686,16 +684,27 @@ static struct ata_port_operations mv5_ops = { }; static struct ata_port_operations mv6_ops = { - .inherits = &mv5_ops, + .inherits = &ata_bmdma_port_ops, + + .lost_interrupt = ATA_OP_NULL, + + .qc_defer = mv_qc_defer, + .qc_prep = mv_qc_prep, + .qc_issue = mv_qc_issue, + .dev_config = mv6_dev_config, - .scr_read = mv_scr_read, - .scr_write = mv_scr_write, + .freeze = mv_eh_freeze, + .thaw = mv_eh_thaw, + .hardreset = mv_hardreset, + .softreset = mv_softreset, .pmp_hardreset = mv_pmp_hardreset, .pmp_softreset = mv_softreset, - .softreset = mv_softreset, .error_handler = mv_pmp_error_handler, + .scr_read = mv_scr_read, + .scr_write = mv_scr_write, + .sff_check_status = mv_sff_check_status, .sff_irq_clear = mv_sff_irq_clear, .check_atapi_dma = mv_check_atapi_dma, @@ -703,6 +712,9 @@ static struct ata_port_operations mv6_ops = { .bmdma_start = mv_bmdma_start, .bmdma_stop = mv_bmdma_stop, .bmdma_status = mv_bmdma_status, + + .port_start = mv_port_start, + .port_stop = mv_port_stop, }; static struct ata_port_operations mv_iie_ops = { @@ -2248,7 +2260,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) } if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_pio_queue_task(ap, qc, 0); + ata_sff_queue_pio_task(ap, 0); return 0; } @@ -2344,7 +2356,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) if (IS_GEN_II(hpriv)) return mv_qc_issue_fis(qc); } - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) @@ -2355,13 +2367,9 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return NULL; qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc) { - if (qc->tf.flags & ATA_TFLAG_POLLING) - qc = NULL; - else if (!(qc->flags & ATA_QCFLAG_ACTIVE)) - qc = NULL; - } - return qc; + if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) + return qc; + return NULL; } static void mv_pmp_error_handler(struct ata_port *ap) @@ -2546,9 +2554,7 @@ static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) char *when = "idle"; ata_ehi_clear_desc(ehi); - if (ap->flags & ATA_FLAG_DISABLED) { - when = "disabled"; - } else if (edma_was_enabled) { + if (edma_was_enabled) { when = "EDMA enabled"; } else { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); @@ -2782,10 +2788,6 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) struct mv_port_priv *pp; int edma_was_enabled; - if (ap->flags & ATA_FLAG_DISABLED) { - mv_unexpected_intr(ap, 0); - return; - } /* * Grab a snapshot of the EDMA_EN flag setting, * so that we have a consistent view for this port, @@ -2809,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) - ata_sff_host_intr(ap, qc); + ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } @@ -3656,9 +3658,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) /* special case: control/altstatus doesn't have ATA_REG_ address */ port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; - /* unused: */ - port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; - /* Clear any currently outstanding port interrupt conditions */ serr = port_mmio + mv_scr_offset(SCR_ERROR); writelfl(readl(serr), serr); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 2a98b09ab73..21161136cad 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -272,7 +272,7 @@ enum ncq_saw_flag_list { }; struct nv_swncq_port_priv { - struct ata_prd *prd; /* our SG list */ + struct ata_bmdma_prd *prd; /* our SG list */ dma_addr_t prd_dma; /* and its DMA mapping */ void __iomem *sactive_block; void __iomem *irq_block; @@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) } /* handle interrupt */ - return ata_sff_host_intr(ap, qc); + return ata_bmdma_port_intr(ap, qc); } static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) @@ -933,107 +933,108 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; + struct nv_adma_port_priv *pp = ap->private_data; + void __iomem *mmio = pp->ctl_block; + u16 status; + u32 gen_ctl; + u32 notifier, notifier_error; + notifier_clears[i] = 0; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - struct nv_adma_port_priv *pp = ap->private_data; - void __iomem *mmio = pp->ctl_block; - u16 status; - u32 gen_ctl; - u32 notifier, notifier_error; - - /* if ADMA is disabled, use standard ata interrupt handler */ - if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) - >> (NV_INT_PORT_SHIFT * i); - handled += nv_host_intr(ap, irq_stat); - continue; - } + /* if ADMA is disabled, use standard ata interrupt handler */ + if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + handled += nv_host_intr(ap, irq_stat); + continue; + } - /* if in ATA register mode, check for standard interrupts */ - if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { - u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) - >> (NV_INT_PORT_SHIFT * i); - if (ata_tag_valid(ap->link.active_tag)) - /** NV_INT_DEV indication seems unreliable at times - at least in ADMA mode. Force it on always when a - command is active, to prevent losing interrupts. */ - irq_stat |= NV_INT_DEV; - handled += nv_host_intr(ap, irq_stat); - } + /* if in ATA register mode, check for standard interrupts */ + if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { + u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) + >> (NV_INT_PORT_SHIFT * i); + if (ata_tag_valid(ap->link.active_tag)) + /** NV_INT_DEV indication seems unreliable + at times at least in ADMA mode. Force it + on always when a command is active, to + prevent losing interrupts. */ + irq_stat |= NV_INT_DEV; + handled += nv_host_intr(ap, irq_stat); + } + + notifier = readl(mmio + NV_ADMA_NOTIFIER); + notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); + notifier_clears[i] = notifier | notifier_error; + + gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); + + if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && + !notifier_error) + /* Nothing to do */ + continue; + + status = readw(mmio + NV_ADMA_STAT); + + /* + * Clear status. Ensure the controller sees the + * clearing before we start looking at any of the CPB + * statuses, so that any CPB completions after this + * point in the handler will raise another interrupt. + */ + writew(status, mmio + NV_ADMA_STAT); + readw(mmio + NV_ADMA_STAT); /* flush posted write */ + rmb(); + + handled++; /* irq handled if we got here */ - notifier = readl(mmio + NV_ADMA_NOTIFIER); - notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); - notifier_clears[i] = notifier | notifier_error; - - gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); - - if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && - !notifier_error) - /* Nothing to do */ - continue; - - status = readw(mmio + NV_ADMA_STAT); - - /* Clear status. Ensure the controller sees the clearing before we start - looking at any of the CPB statuses, so that any CPB completions after - this point in the handler will raise another interrupt. */ - writew(status, mmio + NV_ADMA_STAT); - readw(mmio + NV_ADMA_STAT); /* flush posted write */ - rmb(); - - handled++; /* irq handled if we got here */ - - /* freeze if hotplugged or controller error */ - if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | - NV_ADMA_STAT_HOTUNPLUG | - NV_ADMA_STAT_TIMEOUT | - NV_ADMA_STAT_SERROR))) { - struct ata_eh_info *ehi = &ap->link.eh_info; - - ata_ehi_clear_desc(ehi); - __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); - if (status & NV_ADMA_STAT_TIMEOUT) { - ehi->err_mask |= AC_ERR_SYSTEM; - ata_ehi_push_desc(ehi, "timeout"); - } else if (status & NV_ADMA_STAT_HOTPLUG) { - ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, "hotplug"); - } else if (status & NV_ADMA_STAT_HOTUNPLUG) { - ata_ehi_hotplugged(ehi); - ata_ehi_push_desc(ehi, "hot unplug"); - } else if (status & NV_ADMA_STAT_SERROR) { - /* let libata analyze SError and figure out the cause */ - ata_ehi_push_desc(ehi, "SError"); - } else - ata_ehi_push_desc(ehi, "unknown"); - ata_port_freeze(ap); - continue; + /* freeze if hotplugged or controller error */ + if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | + NV_ADMA_STAT_HOTUNPLUG | + NV_ADMA_STAT_TIMEOUT | + NV_ADMA_STAT_SERROR))) { + struct ata_eh_info *ehi = &ap->link.eh_info; + + ata_ehi_clear_desc(ehi); + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); + if (status & NV_ADMA_STAT_TIMEOUT) { + ehi->err_mask |= AC_ERR_SYSTEM; + ata_ehi_push_desc(ehi, "timeout"); + } else if (status & NV_ADMA_STAT_HOTPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hotplug"); + } else if (status & NV_ADMA_STAT_HOTUNPLUG) { + ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "hot unplug"); + } else if (status & NV_ADMA_STAT_SERROR) { + /* let EH analyze SError and figure out cause */ + ata_ehi_push_desc(ehi, "SError"); + } else + ata_ehi_push_desc(ehi, "unknown"); + ata_port_freeze(ap); + continue; + } + + if (status & (NV_ADMA_STAT_DONE | + NV_ADMA_STAT_CPBERR | + NV_ADMA_STAT_CMD_COMPLETE)) { + u32 check_commands = notifier_clears[i]; + int pos, error = 0; + + if (status & NV_ADMA_STAT_CPBERR) { + /* check all active commands */ + if (ata_tag_valid(ap->link.active_tag)) + check_commands = 1 << + ap->link.active_tag; + else + check_commands = ap->link.sactive; } - if (status & (NV_ADMA_STAT_DONE | - NV_ADMA_STAT_CPBERR | - NV_ADMA_STAT_CMD_COMPLETE)) { - u32 check_commands = notifier_clears[i]; - int pos, error = 0; - - if (status & NV_ADMA_STAT_CPBERR) { - /* Check all active commands */ - if (ata_tag_valid(ap->link.active_tag)) - check_commands = 1 << - ap->link.active_tag; - else - check_commands = ap-> - link.sactive; - } - - /** Check CPBs for completed commands */ - while ((pos = ffs(check_commands)) && !error) { - pos--; - error = nv_adma_check_cpb(ap, pos, + /* check CPBs for completed commands */ + while ((pos = ffs(check_commands)) && !error) { + pos--; + error = nv_adma_check_cpb(ap, pos, notifier_error & (1 << pos)); - check_commands &= ~(1 << pos); - } + check_commands &= ~(1 << pos); } } } @@ -1099,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) u32 notifier_clears[2]; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); return; } @@ -1130,7 +1131,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) struct nv_adma_port_priv *pp = qc->ap->private_data; if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) - ata_sff_post_internal_cmd(qc); + ata_bmdma_post_internal_cmd(qc); } static int nv_adma_port_start(struct ata_port *ap) @@ -1155,7 +1156,8 @@ static int nv_adma_port_start(struct ata_port *ap) if (rc) return rc; - rc = ata_port_start(ap); + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; @@ -1407,7 +1409,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); - ata_sff_qc_prep(qc); + ata_bmdma_qc_prep(qc); return; } @@ -1466,7 +1468,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); } else nv_adma_mode(qc->ap); @@ -1498,22 +1500,19 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) spin_lock_irqsave(&host->lock, flags); for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap; - - ap = host->ports[i]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; + struct ata_port *ap = host->ports[i]; + struct ata_queued_cmd *qc; - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled += ata_sff_host_intr(ap, qc); - else - // No request pending? Clear interrupt status - // anyway, in case there's one pending. - ap->ops->sff_check_status(ap); + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + handled += ata_bmdma_port_intr(ap, qc); + } else { + /* + * No request pending? Clear interrupt status + * anyway, in case there's one pending. + */ + ap->ops->sff_check_status(ap); } - } spin_unlock_irqrestore(&host->lock, flags); @@ -1526,11 +1525,7 @@ static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) int i, handled = 0; for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) - handled += nv_host_intr(ap, irq_stat); - + handled += nv_host_intr(host->ports[i], irq_stat); irq_stat >>= NV_INT_PORT_SHIFT; } @@ -1674,7 +1669,6 @@ static void nv_mcp55_freeze(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask &= ~(NV_INT_ALL_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_freeze(ap); } static void nv_mcp55_thaw(struct ata_port *ap) @@ -1688,7 +1682,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask |= (NV_INT_MASK_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_thaw(ap); } static void nv_adma_error_handler(struct ata_port *ap) @@ -1744,7 +1737,7 @@ static void nv_adma_error_handler(struct ata_port *ap) readw(mmio + NV_ADMA_CTL); /* flush posted write */ } - ata_sff_error_handler(ap); + ata_bmdma_error_handler(ap); } static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) @@ -1870,7 +1863,7 @@ static void nv_swncq_error_handler(struct ata_port *ap) ehc->i.action |= ATA_EH_RESET; } - ata_sff_error_handler(ap); + ata_bmdma_error_handler(ap); } #ifdef CONFIG_PM @@ -1991,7 +1984,8 @@ static int nv_swncq_port_start(struct ata_port *ap) struct nv_swncq_port_priv *pp; int rc; - rc = ata_port_start(ap); + /* we might fallback to bmdma, allocate bmdma resources */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; @@ -2016,7 +2010,7 @@ static int nv_swncq_port_start(struct ata_port *ap) static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { - ata_sff_qc_prep(qc); + ata_bmdma_qc_prep(qc); return; } @@ -2031,7 +2025,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct scatterlist *sg; struct nv_swncq_port_priv *pp = ap->private_data; - struct ata_prd *prd; + struct ata_bmdma_prd *prd; unsigned int si, idx; prd = pp->prd + ATA_MAX_PRD * qc->tag; @@ -2092,7 +2086,7 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) struct nv_swncq_port_priv *pp = ap->private_data; if (qc->tf.protocol != ATA_PROT_NCQ) - return ata_sff_qc_issue(qc); + return ata_bmdma_qc_issue(qc); DPRINTK("Enter\n"); @@ -2380,16 +2374,14 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - if (ap->link.sactive) { - nv_swncq_host_interrupt(ap, (u16)irq_stat); - handled = 1; - } else { - if (irq_stat) /* reserve Hotplug */ - nv_swncq_irq_clear(ap, 0xfff0); + if (ap->link.sactive) { + nv_swncq_host_interrupt(ap, (u16)irq_stat); + handled = 1; + } else { + if (irq_stat) /* reserve Hotplug */ + nv_swncq_irq_clear(ap, 0xfff0); - handled += nv_host_intr(ap, (u8)irq_stat); - } + handled += nv_host_intr(ap, (u8)irq_stat); } irq_stat >>= NV_INT_PORT_SHIFT_MCP55; } @@ -2436,7 +2428,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ppi[0] = &nv_port_info[type]; ipriv = ppi[0]->private_data; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; @@ -2479,8 +2471,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ipriv->irq_handler, - IRQF_SHARED, ipriv->sht); + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); } #ifdef CONFIG_PM diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index 5356ec00d2b..f03ad48273f 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -333,7 +333,8 @@ static int pdc_common_port_start(struct ata_port *ap) struct pdc_port_priv *pp; int rc; - rc = ata_port_start(ap); + /* we use the same prd table as bmdma, allocate it */ + rc = ata_bmdma_port_start(ap); if (rc) return rc; @@ -499,7 +500,7 @@ static int pdc_sata_scr_write(struct ata_link *link, static void pdc_atapi_pkt(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - dma_addr_t sg_table = ap->prd_dma; + dma_addr_t sg_table = ap->bmdma_prd_dma; unsigned int cdb_len = qc->dev->cdb_len; u8 *cdb = qc->cdb; struct pdc_port_priv *pp = ap->private_data; @@ -587,6 +588,7 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc) static void pdc_fill_sg(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct ata_bmdma_prd *prd = ap->bmdma_prd; struct scatterlist *sg; const u32 SG_COUNT_ASIC_BUG = 41*4; unsigned int si, idx; @@ -613,8 +615,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) if ((offset + sg_len) > 0x10000) len = 0x10000 - offset; - ap->prd[idx].addr = cpu_to_le32(addr); - ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); + prd[idx].addr = cpu_to_le32(addr); + prd[idx].flags_len = cpu_to_le32(len & 0xffff); VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); idx++; @@ -623,27 +625,27 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) } } - len = le32_to_cpu(ap->prd[idx - 1].flags_len); + len = le32_to_cpu(prd[idx - 1].flags_len); if (len > SG_COUNT_ASIC_BUG) { u32 addr; VPRINTK("Splitting last PRD.\n"); - addr = le32_to_cpu(ap->prd[idx - 1].addr); - ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); + addr = le32_to_cpu(prd[idx - 1].addr); + prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); addr = addr + len - SG_COUNT_ASIC_BUG; len = SG_COUNT_ASIC_BUG; - ap->prd[idx].addr = cpu_to_le32(addr); - ap->prd[idx].flags_len = cpu_to_le32(len); + prd[idx].addr = cpu_to_le32(addr); + prd[idx].flags_len = cpu_to_le32(len); VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); idx++; } - ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } static void pdc_qc_prep(struct ata_queued_cmd *qc) @@ -658,7 +660,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) pdc_fill_sg(qc); /*FALLTHROUGH*/ case ATA_PROT_NODATA: - i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, + i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, qc->dev->devno, pp->pkt); if (qc->tf.flags & ATA_TFLAG_LBA48) i = pdc_prep_lba48(&qc->tf, pp->pkt, i); @@ -838,7 +840,7 @@ static void pdc_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_reset_port(ap); - ata_std_error_handler(ap); + ata_sff_error_handler(ap); } static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) @@ -984,8 +986,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) /* check for a plug or unplug event */ ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4); tmp = hotplug_status & (0x11 << ata_no); - if (tmp && ap && - !(ap->flags & ATA_FLAG_DISABLED)) { + if (tmp) { struct ata_eh_info *ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); ata_ehi_hotplugged(ehi); @@ -997,8 +998,7 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance) /* check for a packet interrupt */ tmp = mask & (1 << (i + 1)); - if (tmp && ap && - !(ap->flags & ATA_FLAG_DISABLED)) { + if (tmp) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 92ba45e6689..daeebf19a6a 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host); static void qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); -static void qs_bmdma_stop(struct ata_queued_cmd *qc); -static u8 qs_bmdma_status(struct ata_port *ap); static void qs_freeze(struct ata_port *ap); static void qs_thaw(struct ata_port *ap); static int qs_prereset(struct ata_link *link, unsigned long deadline); @@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, - .bmdma_stop = qs_bmdma_stop, - .bmdma_status = qs_bmdma_status, .qc_prep = qs_qc_prep, .qc_issue = qs_qc_issue, @@ -147,7 +143,6 @@ static struct ata_port_operations qs_ata_ops = { .prereset = qs_prereset, .softreset = ATA_OP_NULL, .error_handler = qs_error_handler, - .post_internal_cmd = ATA_OP_NULL, .lost_interrupt = ATA_OP_NULL, .scr_read = qs_scr_read, @@ -191,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc) return 1; /* ATAPI DMA not supported */ } -static void qs_bmdma_stop(struct ata_queued_cmd *qc) -{ - /* nothing */ -} - -static u8 qs_bmdma_status(struct ata_port *ap) -{ - return 0; -} - static inline void qs_enter_reg_mode(struct ata_port *ap) { u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); @@ -255,7 +240,7 @@ static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) static void qs_error_handler(struct ata_port *ap) { qs_enter_reg_mode(ap); - ata_std_error_handler(ap); + ata_sff_error_handler(ap); } static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) @@ -304,10 +289,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) VPRINTK("ENTER\n"); qs_enter_reg_mode(qc->ap); - if (qc->tf.protocol != ATA_PROT_DMA) { - ata_sff_qc_prep(qc); + if (qc->tf.protocol != ATA_PROT_DMA) return; - } nelem = qs_fill_sg(qc); @@ -404,26 +387,24 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host) u8 sHST = sff1 & 0x3f; /* host status */ unsigned int port_no = (sff1 >> 8) & 0x03; struct ata_port *ap = host->ports[port_no]; + struct qs_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", sff1, sff0, port_no, sHST, sDST); handled = 1; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - struct qs_port_priv *pp = ap->private_data; - if (!pp || pp->state != qs_state_pkt) - continue; - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - switch (sHST) { - case 0: /* successful CPB */ - case 3: /* device error */ - qs_enter_reg_mode(qc->ap); - qs_do_or_die(qc, sDST); - break; - default: - break; - } + if (!pp || pp->state != qs_state_pkt) + continue; + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { + switch (sHST) { + case 0: /* successful CPB */ + case 3: /* device error */ + qs_enter_reg_mode(qc->ap); + qs_do_or_die(qc, sDST); + break; + default: + break; } } } @@ -436,33 +417,30 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) unsigned int handled = 0, port_no; for (port_no = 0; port_no < host->n_ports; ++port_no) { - struct ata_port *ap; - ap = host->ports[port_no]; - if (ap && - !(ap->flags & ATA_FLAG_DISABLED)) { - struct ata_queued_cmd *qc; - struct qs_port_priv *pp; - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (!qc || !(qc->flags & ATA_QCFLAG_ACTIVE)) { - /* - * The qstor hardware generates spurious - * interrupts from time to time when switching - * in and out of packet mode. - * There's no obvious way to know if we're - * here now due to that, so just ack the irq - * and pretend we knew it was ours.. (ugh). - * This does not affect packet mode. - */ - ata_sff_check_status(ap); - handled = 1; - continue; - } - pp = ap->private_data; - if (!pp || pp->state != qs_state_mmio) - continue; - if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + struct ata_port *ap = host->ports[port_no]; + struct qs_port_priv *pp = ap->private_data; + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->link.active_tag); + if (!qc) { + /* + * The qstor hardware generates spurious + * interrupts from time to time when switching + * in and out of packet mode. There's no + * obvious way to know if we're here now due + * to that, so just ack the irq and pretend we + * knew it was ours.. (ugh). This does not + * affect packet mode. + */ + ata_sff_check_status(ap); + handled = 1; + continue; } + + if (!pp || pp->state != qs_state_mmio) + continue; + if (!(qc->tf.flags & ATA_TFLAG_POLLING)) + handled |= ata_sff_port_intr(ap, qc); } return handled; } @@ -509,11 +487,7 @@ static int qs_port_start(struct ata_port *ap) void __iomem *mmio_base = qs_mmio_base(ap->host); void __iomem *chan = mmio_base + (ap->port_no * 0x4000); u64 addr; - int rc; - rc = ata_port_start(ap); - if (rc) - return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 3cb69d5fb81..3a4f8421971 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -284,7 +284,7 @@ static void sil_bmdma_setup(struct ata_queued_cmd *qc) void __iomem *bmdma = ap->ioaddr.bmdma_addr; /* load PRD table addr. */ - iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS); + iowrite32(ap->bmdma_prd_dma, bmdma + ATA_DMA_TABLE_OFS); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); @@ -311,10 +311,10 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) { struct scatterlist *sg; struct ata_port *ap = qc->ap; - struct ata_prd *prd, *last_prd = NULL; + struct ata_bmdma_prd *prd, *last_prd = NULL; unsigned int si; - prd = &ap->prd[0]; + prd = &ap->bmdma_prd[0]; for_each_sg(qc->sg, sg, qc->n_elem, si) { /* Note h/w doesn't support 64-bit, so we unconditionally * truncate dma_addr_t to u32. @@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) goto err_hsm; /* ack bmdma irq events */ - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* kick HSM in the ass */ ata_sff_hsm_move(ap, qc, status, 0); @@ -532,9 +532,6 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance) struct ata_port *ap = host->ports[i]; u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); - if (unlikely(ap->flags & ATA_FLAG_DISABLED)) - continue; - /* turn off SATA_IRQ if not supported */ if (ap->flags & SIL_FLAG_NO_SATA_IRQ) bmdma2 &= ~SIL_DMA_SATA_IRQ; @@ -587,7 +584,7 @@ static void sil_thaw(struct ata_port *ap) /* clear IRQ */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* turn on SATA IRQ if supported */ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 433b6b89c79..be7726d7686 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -539,12 +539,12 @@ static void sil24_config_port(struct ata_port *ap) writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); /* zero error counters. */ - writel(0x8000, port + PORT_DECODE_ERR_THRESH); - writel(0x8000, port + PORT_CRC_ERR_THRESH); - writel(0x8000, port + PORT_HSHK_ERR_THRESH); - writel(0x0000, port + PORT_DECODE_ERR_CNT); - writel(0x0000, port + PORT_CRC_ERR_CNT); - writel(0x0000, port + PORT_HSHK_ERR_CNT); + writew(0x8000, port + PORT_DECODE_ERR_THRESH); + writew(0x8000, port + PORT_CRC_ERR_THRESH); + writew(0x8000, port + PORT_HSHK_ERR_THRESH); + writew(0x0000, port + PORT_DECODE_ERR_CNT); + writew(0x0000, port + PORT_CRC_ERR_CNT); + writew(0x0000, port + PORT_HSHK_ERR_CNT); /* always use 64bit activation */ writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR); @@ -622,6 +622,11 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, irq_enabled = readl(port + PORT_IRQ_ENABLE_SET); writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR); + /* + * The barrier is required to ensure that writes to cmd_block reach + * the memory before the write to PORT_CMD_ACTIVATE. + */ + wmb(); writel((u32)paddr, port + PORT_CMD_ACTIVATE); writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); @@ -865,7 +870,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) } else { prb = &cb->atapi.prb; sge = cb->atapi.sge; - memset(cb->atapi.cdb, 0, 32); + memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb)); memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len); if (ata_is_data(qc->tf.protocol)) { @@ -895,6 +900,11 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); activate = port + PORT_CMD_ACTIVATE + tag * 8; + /* + * The barrier is required to ensure that writes to cmd_block reach + * the memory before the write to PORT_CMD_ACTIVATE. + */ + wmb(); writel((u32)paddr, activate); writel((u64)paddr >> 32, activate + 4); @@ -1160,13 +1170,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) if (status & (1 << i)) { - struct ata_port *ap = host->ports[i]; - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - sil24_host_intr(ap); - handled++; - } else - printk(KERN_ERR DRV_NAME - ": interrupt from disabled port %d\n", i); + sil24_host_intr(host->ports[i]); + handled++; } spin_unlock(&host->lock); diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index f8a91bfd66a..2bfe3ae0397 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; @@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sis_sht); } diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 7257f2d5c52..7d9db4aaf07 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) /* load PRD table addr. */ mb(); /* make sure PRD table writes are visible to controller */ - writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = readb(mmio + ATA_DMA_CMD); @@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &k2_sata_sht); } diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 232468f2ea9..bedd5188e5b 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -302,11 +302,6 @@ static int pdc_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct pdc_port_priv *pp; - int rc; - - rc = ata_port_start(ap); - if (rc) - return rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) @@ -840,8 +835,7 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance) ap = host->ports[port_no]; tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); - if (tmp && ap && - !(ap->flags & ATA_FLAG_DISABLED)) { + if (tmp && ap) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->link.active_tag); @@ -927,7 +921,7 @@ static void pdc_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) pdc_reset_port(ap); - ata_std_error_handler(ap); + ata_sff_error_handler(ap); } static void pdc_post_internal_cmd(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index 011e098590d..b8578c32d34 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -181,9 +181,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ata_pci_bmdma_init(host); - if (rc) - return rc; + ata_pci_bmdma_init(host); iomap = host->iomap; @@ -244,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &uli_sht); } diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 08f65492cc8..4730c42a5ee 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap) * certain way. Leave it alone and just clear pending IRQ. */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); } /** @@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int i, rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -575,6 +575,33 @@ static void svia_configure(struct pci_dev *pdev) tmp8 |= NATIVE_MODE_ALL; pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } + + /* + * vt6421 has problems talking to some drives. The following + * is the fix from Joseph Chan <JosephChan@via.com.tw>. + * + * When host issues HOLD, device may send up to 20DW of data + * before acknowledging it with HOLDA and the host should be + * able to buffer them in FIFO. Unfortunately, some WD drives + * send upto 40DW before acknowledging HOLD and, in the + * default configuration, this ends up overflowing vt6421's + * FIFO, making the controller abort the transaction with + * R_ERR. + * + * Rx52[2] is the internal 128DW FIFO Flow control watermark + * adjusting mechanism enable bit and the default value 0 + * means host will issue HOLD to device when the left FIFO + * size goes below 32DW. Setting it to 1 makes the watermark + * 64DW. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15173 + * http://article.gmane.org/gmane.linux.ide/46352 + */ + if (pdev->device == 0x3249) { + pci_read_config_byte(pdev, 0x52, &tmp8); + tmp8 |= 1 << 2; + pci_write_config_byte(pdev, 0x52, tmp8); + } } static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -628,7 +655,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) svia_configure(pdev); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &svia_sht); } diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 8b2a278b254..b777176ff49 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled = ata_sff_host_intr(ap, qc); + handled = ata_bmdma_port_intr(ap, qc); /* We received an interrupt during a polled command, * or some other spurious condition. Interrupt reporting @@ -284,14 +284,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance) for (i = 0; i < host->n_ports; i++) { u8 port_status = (status >> (8 * i)) & 0xff; if (port_status) { - struct ata_port *ap = host->ports[i]; - - if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { - vsc_port_intr(port_status, ap); - handled++; - } else - dev_printk(KERN_ERR, host->dev, - "interrupt from disabled port %d\n", i); + vsc_port_intr(port_status, host->ports[i]); + handled++; } } |