diff options
Diffstat (limited to 'drivers/ata')
29 files changed, 1711 insertions, 461 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 9bf2986a278..ae8494944c4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -651,9 +651,17 @@ config PATA_WINBOND_VLB Support for the Winbond W83759A controller on Vesa Local Bus systems. +config HAVE_PATA_PLATFORM + bool + help + This is an internal configuration node for any machine that + uses pata-platform driver to enable the relevant driver in the + configuration structure without having to submit endless patches + to update the PATA_PLATFORM entry. + config PATA_PLATFORM tristate "Generic platform device PATA support" - depends on EMBEDDED || ARCH_RPC || PPC + depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM help This option enables support for generic directly connected ATA devices commonly found on embedded systems. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 97f83fb2ee2..ef3e5522e1a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -56,6 +56,12 @@ MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip) static int ahci_enable_alpm(struct ata_port *ap, enum link_pm policy); static void ahci_disable_alpm(struct ata_port *ap); +static ssize_t ahci_led_show(struct ata_port *ap, char *buf); +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size); +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size); +#define MAX_SLOTS 8 enum { AHCI_PCI_BAR = 5, @@ -89,6 +95,8 @@ enum { board_ahci_sb600 = 3, board_ahci_mv = 4, board_ahci_sb700 = 5, + board_ahci_mcp65 = 6, + board_ahci_nopmp = 7, /* global controller registers */ HOST_CAP = 0x00, /* host capabilities */ @@ -96,6 +104,8 @@ enum { HOST_IRQ_STAT = 0x08, /* interrupt status */ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ + HOST_EM_LOC = 0x1c, /* Enclosure Management location */ + HOST_EM_CTL = 0x20, /* Enclosure Management Control */ /* HOST_CTL bits */ HOST_RESET = (1 << 0), /* reset controller; self-clear */ @@ -103,6 +113,7 @@ enum { HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ /* HOST_CAP bits */ + HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ HOST_CAP_SSC = (1 << 14), /* Slumber capable */ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ HOST_CAP_CLO = (1 << 24), /* Command List Override support */ @@ -190,6 +201,7 @@ enum { AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ /* ap->flags bits */ @@ -199,6 +211,11 @@ enum { ATA_FLAG_IPM, ICH_MAP = 0x90, /* ICH MAP register */ + + /* em_ctl bits */ + EM_CTL_RST = (1 << 9), /* Reset */ + EM_CTL_TM = (1 << 8), /* Transmit Message */ + EM_CTL_ALHD = (1 << 26), /* Activity LED */ }; struct ahci_cmd_hdr { @@ -216,12 +233,21 @@ struct ahci_sg { __le32 flags_size; }; +struct ahci_em_priv { + enum sw_activity blink_policy; + struct timer_list timer; + unsigned long saved_activity; + unsigned long activity; + unsigned long led_state; +}; + struct ahci_host_priv { unsigned int flags; /* AHCI_HFLAG_* */ u32 cap; /* cap to use */ u32 port_map; /* port map to use */ u32 saved_cap; /* saved initial cap */ u32 saved_port_map; /* saved initial port_map */ + u32 em_loc; /* enclosure management location */ }; struct ahci_port_priv { @@ -237,6 +263,8 @@ struct ahci_port_priv { unsigned int ncq_saw_dmas:1; unsigned int ncq_saw_sdb:1; u32 intr_mask; /* interrupts to enable */ + struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info + * per PM slot */ }; static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); @@ -253,6 +281,8 @@ static void ahci_pmp_attach(struct ata_port *ap); static void ahci_pmp_detach(struct ata_port *ap); static int ahci_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); +static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); static int ahci_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, @@ -272,9 +302,20 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); #endif +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); +static ssize_t ahci_activity_store(struct ata_device *dev, + enum sw_activity val); +static void ahci_init_sw_activity(struct ata_link *link); static struct device_attribute *ahci_shost_attrs[] = { &dev_attr_link_power_management_policy, + &dev_attr_em_message_type, + &dev_attr_em_message, + NULL +}; + +static struct device_attribute *ahci_sdev_attrs[] = { + &dev_attr_sw_activity, NULL }; @@ -284,6 +325,7 @@ static struct scsi_host_template ahci_sht = { .sg_tablesize = AHCI_MAX_SG, .dma_boundary = AHCI_DMA_BOUNDARY, .shost_attrs = ahci_shost_attrs, + .sdev_attrs = ahci_sdev_attrs, }; static struct ata_port_operations ahci_ops = { @@ -311,6 +353,10 @@ static struct ata_port_operations ahci_ops = { .enable_pm = ahci_enable_alpm, .disable_pm = ahci_disable_alpm, + .em_show = ahci_led_show, + .em_store = ahci_led_store, + .sw_activity_show = ahci_activity_show, + .sw_activity_store = ahci_activity_store, #ifdef CONFIG_PM .port_suspend = ahci_port_suspend, .port_resume = ahci_port_resume, @@ -329,6 +375,12 @@ static struct ata_port_operations ahci_p5wdh_ops = { .hardreset = ahci_p5wdh_hardreset, }; +static struct ata_port_operations ahci_sb600_ops = { + .inherits = &ahci_ops, + .softreset = ahci_sb600_softreset, + .pmp_softreset = ahci_sb600_softreset, +}; + #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) static const struct ata_port_info ahci_port_info[] = { @@ -359,11 +411,11 @@ static const struct ata_port_info ahci_port_info[] = { { AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | - AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), + AHCI_HFLAG_SECT255), .flags = AHCI_FLAG_COMMON, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, - .port_ops = &ahci_ops, + .port_ops = &ahci_sb600_ops, }, /* board_ahci_mv */ { @@ -377,8 +429,23 @@ static const struct ata_port_info ahci_port_info[] = { }, /* board_ahci_sb700 */ { - AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | - AHCI_HFLAG_NO_PMP), + AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), + .flags = AHCI_FLAG_COMMON, + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_sb600_ops, + }, + /* board_ahci_mcp65 */ + { + AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + /* board_ahci_nopmp */ + { + AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), .flags = AHCI_FLAG_COMMON, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, @@ -438,14 +505,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ /* NVIDIA */ - { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ + { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ @@ -502,15 +569,15 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ - { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ - { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ - { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ - { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ + { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */ + { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */ + { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */ + { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ /* SiS */ - { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ - { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ - { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ + { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */ + { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */ + { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */ /* Marvell */ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ @@ -535,6 +602,11 @@ static struct pci_driver ahci_pci_driver = { #endif }; +static int ahci_em_messages = 1; +module_param(ahci_em_messages, int, 0444); +/* add other LED protocol types when they become supported */ +MODULE_PARM_DESC(ahci_em_messages, + "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); static inline int ahci_nr_ports(u32 cap) { @@ -624,12 +696,26 @@ static void ahci_save_initial_config(struct pci_dev *pdev, cap &= ~HOST_CAP_NCQ; } + if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { + dev_printk(KERN_INFO, &pdev->dev, + "controller can do NCQ, turning on CAP_NCQ\n"); + cap |= HOST_CAP_NCQ; + } + if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { dev_printk(KERN_INFO, &pdev->dev, "controller can't do PMP, turning off CAP_PMP\n"); cap &= ~HOST_CAP_PMP; } + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && + port_map != 1) { + dev_printk(KERN_INFO, &pdev->dev, + "JMB361 has only one port, port_map 0x%x -> 0x%x\n", + port_map, 1); + port_map = 1; + } + /* * Temporary Marvell 6145 hack: PATA port presence * is asserted through the standard AHCI port @@ -991,11 +1077,28 @@ static void ahci_power_down(struct ata_port *ap) static void ahci_start_port(struct ata_port *ap) { + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + /* enable FIS reception */ ahci_start_fis_rx(ap); /* enable DMA */ ahci_start_engine(ap); + + /* turn on LEDs */ + if (ap->flags & ATA_FLAG_EM) { + ata_port_for_each_link(link, ap) { + emp = &pp->em_priv[link->pmp]; + ahci_transmit_led_message(ap, emp->led_state, 4); + } + } + + if (ap->flags & ATA_FLAG_SW_ACTIVITY) + ata_port_for_each_link(link, ap) + ahci_init_sw_activity(link); + } static int ahci_deinit_port(struct ata_port *ap, const char **emsg) @@ -1039,12 +1142,15 @@ static int ahci_reset_controller(struct ata_host *host) readl(mmio + HOST_CTL); /* flush */ } - /* reset must complete within 1 second, or + /* + * to perform host reset, OS should set HOST_RESET + * and poll until this bit is read to be "0". + * reset must complete within 1 second, or * the hardware should be considered fried. */ - ssleep(1); + tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, + HOST_RESET, 10, 1000); - tmp = readl(mmio + HOST_CTL); if (tmp & HOST_RESET) { dev_printk(KERN_ERR, host->dev, "controller reset failed (0x%x)\n", tmp); @@ -1076,6 +1182,230 @@ static int ahci_reset_controller(struct ata_host *host) return 0; } +static void ahci_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) + return; + + emp->activity++; + if (!timer_pending(&emp->timer)) + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); +} + +static void ahci_sw_activity_blink(unsigned long arg) +{ + struct ata_link *link = (struct ata_link *)arg; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + unsigned long led_message = emp->led_state; + u32 activity_led_state; + + led_message &= 0xffff0000; + led_message |= ap->port_no | (link->pmp << 8); + + /* check to see if we've had activity. If so, + * toggle state of LED and reset timer. If not, + * turn LED to desired idle state. + */ + if (emp->saved_activity != emp->activity) { + emp->saved_activity = emp->activity; + /* get the current LED state */ + activity_led_state = led_message & 0x00010000; + + if (activity_led_state) + activity_led_state = 0; + else + activity_led_state = 1; + + /* clear old state */ + led_message &= 0xfff8ffff; + + /* toggle state */ + led_message |= (activity_led_state << 16); + mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); + } else { + /* switch to idle */ + led_message &= 0xfff8ffff; + if (emp->blink_policy == BLINK_OFF) + led_message |= (1 << 16); + } + ahci_transmit_led_message(ap, led_message, 4); +} + +static void ahci_init_sw_activity(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* init activity stats, setup timer */ + emp->saved_activity = emp->activity = 0; + setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); + + /* check our blink policy and set flag for link if it's enabled */ + if (emp->blink_policy) + link->flags |= ATA_LFLAG_SW_ACTIVITY; +} + +static int ahci_reset_em(struct ata_host *host) +{ + void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; + u32 em_ctl; + + em_ctl = readl(mmio + HOST_EM_CTL); + if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) + return -EINVAL; + + writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); + return 0; +} + +static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, + ssize_t size) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + struct ahci_port_priv *pp = ap->private_data; + void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; + u32 em_ctl; + u32 message[] = {0, 0}; + unsigned long flags; + int pmp; + struct ahci_em_priv *emp; + + /* get the slot number from the message */ + pmp = (state & 0x0000ff00) >> 8; + if (pmp < MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + spin_lock_irqsave(ap->lock, flags); + + /* + * if we are still busy transmitting a previous message, + * do not allow + */ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_TM) { + spin_unlock_irqrestore(ap->lock, flags); + return -EINVAL; + } + + /* + * create message header - this is all zero except for + * the message size, which is 4 bytes. + */ + message[0] |= (4 << 8); + + /* ignore 0:4 of byte zero, fill in port info yourself */ + message[1] = ((state & 0xfffffff0) | ap->port_no); + + /* write message to EM_LOC */ + writel(message[0], mmio + hpriv->em_loc); + writel(message[1], mmio + hpriv->em_loc+4); + + /* save off new led state for port/slot */ + emp->led_state = message[1]; + + /* + * tell hardware to transmit the message + */ + writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); + + spin_unlock_irqrestore(ap->lock, flags); + return size; +} + +static ssize_t ahci_led_show(struct ata_port *ap, char *buf) +{ + struct ahci_port_priv *pp = ap->private_data; + struct ata_link *link; + struct ahci_em_priv *emp; + int rc = 0; + + ata_port_for_each_link(link, ap) { + emp = &pp->em_priv[link->pmp]; + rc += sprintf(buf, "%lx\n", emp->led_state); + } + return rc; +} + +static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, + size_t size) +{ + int state; + int pmp; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp; + + state = simple_strtoul(buf, NULL, 0); + + /* get the slot number from the message */ + pmp = (state & 0x0000ff00) >> 8; + if (pmp < MAX_SLOTS) + emp = &pp->em_priv[pmp]; + else + return -EINVAL; + + /* mask off the activity bits if we are in sw_activity + * mode, user should turn off sw_activity before setting + * activity led through em_message + */ + if (emp->blink_policy) + state &= 0xfff8ffff; + + return ahci_transmit_led_message(ap, state, size); +} + +static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + u32 port_led_state = emp->led_state; + + /* save the desired Activity LED behavior */ + if (val == OFF) { + /* clear LFLAG */ + link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); + + /* set the LED to OFF */ + port_led_state &= 0xfff80000; + port_led_state |= (ap->port_no | (link->pmp << 8)); + ahci_transmit_led_message(ap, port_led_state, 4); + } else { + link->flags |= ATA_LFLAG_SW_ACTIVITY; + if (val == BLINK_OFF) { + /* set LED to ON for idle */ + port_led_state &= 0xfff80000; + port_led_state |= (ap->port_no | (link->pmp << 8)); + port_led_state |= 0x00010000; /* check this */ + ahci_transmit_led_message(ap, port_led_state, 4); + } + } + emp->blink_policy = val; + return 0; +} + +static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) +{ + struct ata_link *link = dev->link; + struct ata_port *ap = link->ap; + struct ahci_port_priv *pp = ap->private_data; + struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + + /* display the saved value of activity behavior for this + * disk. + */ + return sprintf(buf, "%d\n", emp->blink_policy); +} + static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, int port_no, void __iomem *mmio, void __iomem *port_mmio) @@ -1262,19 +1592,11 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, return 0; } -static int ahci_check_ready(struct ata_link *link) -{ - void __iomem *port_mmio = ahci_port_base(link->ap); - u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; - - return ata_check_ready(status); -} - -static int ahci_softreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) +static int ahci_do_softreset(struct ata_link *link, unsigned int *class, + int pmp, unsigned long deadline, + int (*check_ready)(struct ata_link *link)) { struct ata_port *ap = link->ap; - int pmp = sata_srst_pmp(link); const char *reason = NULL; unsigned long now, msecs; struct ata_taskfile tf; @@ -1312,7 +1634,7 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class, ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); /* wait for link to become ready */ - rc = ata_wait_after_reset(link, deadline, ahci_check_ready); + rc = ata_wait_after_reset(link, deadline, check_ready); /* link occupied, -ENODEV too is an error */ if (rc) { reason = "device not ready"; @@ -1328,6 +1650,72 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class, return rc; } +static int ahci_check_ready(struct ata_link *link) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + + return ata_check_ready(status); +} + +static int ahci_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int pmp = sata_srst_pmp(link); + + DPRINTK("ENTER\n"); + + return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); +} + +static int ahci_sb600_check_ready(struct ata_link *link) +{ + void __iomem *port_mmio = ahci_port_base(link->ap); + u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; + u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); + + /* + * There is no need to check TFDATA if BAD PMP is found due to HW bug, + * which can save timeout delay. + */ + if (irq_status & PORT_IRQ_BAD_PMP) + return -EIO; + + return ata_check_ready(status); +} + +static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + void __iomem *port_mmio = ahci_port_base(ap); + int pmp = sata_srst_pmp(link); + int rc; + u32 irq_sts; + + DPRINTK("ENTER\n"); + + rc = ahci_do_softreset(link, class, pmp, deadline, + ahci_sb600_check_ready); + + /* + * Soft reset fails on some ATI chips with IPMS set when PMP + * is enabled but SATA HDD/ODD is connected to SATA port, + * do soft reset again to port 0. + */ + if (rc == -EIO) { + irq_sts = readl(port_mmio + PORT_IRQ_STAT); + if (irq_sts & PORT_IRQ_BAD_PMP) { + ata_link_printk(link, KERN_WARNING, + "failed due to HW bug, retry pmp=0\n"); + rc = ahci_do_softreset(link, class, 0, deadline, + ahci_check_ready); + } + } + + return rc; +} + static int ahci_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { @@ -1679,7 +2067,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) struct ahci_host_priv *hpriv; unsigned int i, handled = 0; void __iomem *mmio; - u32 irq_stat, irq_ack = 0; + u32 irq_stat, irq_masked; VPRINTK("ENTER\n"); @@ -1688,16 +2076,17 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) /* sigh. 0xffffffff is a valid return from h/w */ irq_stat = readl(mmio + HOST_IRQ_STAT); - irq_stat &= hpriv->port_map; if (!irq_stat) return IRQ_NONE; + irq_masked = irq_stat & hpriv->port_map; + spin_lock(&host->lock); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap; - if (!(irq_stat & (1 << i))) + if (!(irq_masked & (1 << i))) continue; ap = host->ports[i]; @@ -1711,14 +2100,20 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) "interrupt on disabled port %u\n", i); } - irq_ack |= (1 << i); - } - - if (irq_ack) { - writel(irq_ack, mmio + HOST_IRQ_STAT); handled = 1; } + /* HOST_IRQ_STAT behaves as level triggered latch meaning that + * it should be cleared after all the port events are cleared; + * otherwise, it will raise a spurious interrupt after each + * valid one. Please read section 10.6.2 of ahci 1.1 for more + * information. + * + * Also, use the unmasked value to clear interrupt as spurious + * pending event on a dummy port might cause screaming IRQ. + */ + writel(irq_stat, mmio + HOST_IRQ_STAT); + spin_unlock(&host->lock); VPRINTK("EXIT\n"); @@ -1741,7 +2136,8 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) if (qc->tf.protocol == ATA_PROT_NCQ) writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); - readl(port_mmio + PORT_CMD_ISSUE); /* flush */ + + ahci_sw_activity(qc->dev->link); return 0; } @@ -2049,7 +2445,8 @@ static void ahci_print_info(struct ata_host *host) dev_printk(KERN_INFO, &pdev->dev, "flags: " "%s%s%s%s%s%s%s" - "%s%s%s%s%s%s%s\n" + "%s%s%s%s%s%s%s" + "%s\n" , cap & (1 << 31) ? "64bit " : "", @@ -2066,7 +2463,8 @@ static void ahci_print_info(struct ata_host *host) cap & (1 << 17) ? "pmp " : "", cap & (1 << 15) ? "pio " : "", cap & (1 << 14) ? "slum " : "", - cap & (1 << 13) ? "part " : "" + cap & (1 << 13) ? "part " : "", + cap & (1 << 6) ? "ems ": "" ); } @@ -2118,7 +2516,8 @@ static void ahci_p5wdh_workaround(struct ata_host *host) static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; - struct ata_port_info pi = ahci_port_info[ent->driver_data]; + unsigned int board_id = ent->driver_data; + struct ata_port_info pi = ahci_port_info[board_id]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; @@ -2167,6 +2566,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; hpriv->flags |= (unsigned long)pi.private_data; + /* MCP65 revision A1 and A2 can't do MSI */ + if (board_id == board_ahci_mcp65 && + (pdev->revision == 0xa1 || pdev->revision == 0xa2)) + hpriv->flags |= AHCI_HFLAG_NO_MSI; + if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) pci_intx(pdev, 1); @@ -2180,6 +2584,24 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; + if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { + u8 messages; + void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + + messages = (em_ctl & 0x000f0000) >> 16; + + /* we only support LED message type right now */ + if ((messages & 0x01) && (ahci_em_messages == 1)) { + /* store em_loc */ + hpriv->em_loc = ((em_loc >> 16) * 4); + pi.flags |= ATA_FLAG_EM; + if (!(em_ctl & EM_CTL_ALHD)) + pi.flags |= ATA_FLAG_SW_ACTIVITY; + } + } + /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at @@ -2193,6 +2615,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) host->iomap = pcim_iomap_table(pdev); host->private_data = hpriv; + if (pi.flags & ATA_FLAG_EM) + ahci_reset_em(host); + for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; @@ -2203,6 +2628,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* set initial link pm policy */ ap->pm_policy = NOT_AVAILABLE; + /* set enclosure management message type */ + if (ap->flags & ATA_FLAG_EM) + ap->em_message_type = ahci_em_messages; + + /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index a9027b8fbdd..c294121fd69 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -247,10 +247,12 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller 2 IDE (ICH8) */ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, - /* Mobile SATA Controller IDE (ICH8M) */ - { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* Mobile SATA Controller IDE (ICH8M), Apple */ { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, + { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, + { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata }, + /* Mobile SATA Controller IDE (ICH8M) */ + { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH9) */ { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, /* SATA Controller IDE (ICH9) */ @@ -526,7 +528,7 @@ static struct ata_port_info piix_port_info[] = { [ich8m_apple_sata] = { - .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, + .flags = PIIX_SATA_FLAGS, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = ATA_UDMA6, @@ -573,6 +575,8 @@ static const struct ich_laptop ich_laptop[] = { { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ + { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ + { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ /* end marker */ { 0, } @@ -1040,6 +1044,13 @@ static int piix_broken_suspend(void) }, }, { + .ident = "TECRA M4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"), + }, + }, + { .ident = "TECRA M5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index dbf6ca781f6..9330b7922f6 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -29,14 +29,16 @@ enum { ATA_ACPI_FILTER_SETXFER = 1 << 0, ATA_ACPI_FILTER_LOCK = 1 << 1, + ATA_ACPI_FILTER_DIPM = 1 << 2, ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | - ATA_ACPI_FILTER_LOCK, + ATA_ACPI_FILTER_LOCK | + ATA_ACPI_FILTER_DIPM, }; static unsigned int ata_acpi_gtf_filter = ATA_ACPI_FILTER_DEFAULT; module_param_named(acpi_gtf_filter, ata_acpi_gtf_filter, int, 0644); -MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock)"); +MODULE_PARM_DESC(acpi_gtf_filter, "filter mask for ACPI _GTF commands, set to filter out (0x1=set xfermode, 0x2=lock/freeze lock, 0x4=DIPM)"); #define NO_PORT_MULT 0xffff #define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) @@ -118,12 +120,62 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; } -static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device - *dev, u32 event) +static void ata_acpi_eject_device(acpi_handle handle) +{ + struct acpi_object_list arg_list; + union acpi_object arg; + + arg_list.count = 1; + arg_list.pointer = &arg; + arg.type = ACPI_TYPE_INTEGER; + arg.integer.value = 1; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0", + &arg_list, NULL))) + printk(KERN_ERR "Failed to evaluate _EJ0!\n"); +} + +/* @ap and @dev are the same as ata_acpi_handle_hotplug() */ +static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) +{ + if (dev) + dev->flags |= ATA_DFLAG_DETACH; + else { + struct ata_link *tlink; + struct ata_device *tdev; + + ata_port_for_each_link(tlink, ap) + ata_link_for_each_dev(tdev, tlink) + tdev->flags |= ATA_DFLAG_DETACH; + } + + ata_port_schedule_eh(ap); +} + +/** + * ata_acpi_handle_hotplug - ACPI event handler backend + * @ap: ATA port ACPI event occurred + * @dev: ATA device ACPI event occurred (can be NULL) + * @event: ACPI event which occurred + * @is_dock_event: boolean indicating whether the event was a dock one + * + * All ACPI bay / device realted events end up in this function. If + * the event is port-wide @dev is NULL. If the event is specific to a + * device, @dev points to it. + * + * Hotplug (as opposed to unplug) notification is always handled as + * port-wide while unplug only kills the target device on device-wide + * event. + * + * LOCKING: + * ACPI notify handler context. May sleep. + */ +static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, + u32 event, int is_dock_event) { char event_string[12]; char *envp[] = { event_string, NULL }; - struct ata_eh_info *ehi; + struct ata_eh_info *ehi = &ap->link.eh_info; struct kobject *kobj = NULL; int wait = 0; unsigned long flags; @@ -131,87 +183,103 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device unsigned long sta; acpi_status status; - if (!ap) - ap = dev->link->ap; - ehi = &ap->link.eh_info; - - spin_lock_irqsave(ap->lock, flags); - - if (dev) + if (dev) { + if (dev->sdev) + kobj = &dev->sdev->sdev_gendev.kobj; handle = dev->acpi_handle; - else + } else { + kobj = &ap->dev->kobj; handle = ap->acpi_handle; + } status = acpi_get_handle(handle, "_EJ0", &tmphandle); - if (ACPI_FAILURE(status)) { - /* This device is not ejectable */ - spin_unlock_irqrestore(ap->lock, flags); + if (ACPI_FAILURE(status)) + /* This device does not support hotplug */ return; - } - status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); - if (ACPI_FAILURE(status)) { - printk ("Unable to determine bay status\n"); - spin_unlock_irqrestore(ap->lock, flags); - return; - } + if (event == ACPI_NOTIFY_BUS_CHECK || + event == ACPI_NOTIFY_DEVICE_CHECK) + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); + + spin_lock_irqsave(ap->lock, flags); switch (event) { case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: ata_ehi_push_desc(ehi, "ACPI event"); - if (!sta) { - /* Device has been unplugged */ - if (dev) - dev->flags |= ATA_DFLAG_DETACH; - else { - struct ata_link *tlink; - struct ata_device *tdev; - - ata_port_for_each_link(tlink, ap) { - ata_link_for_each_dev(tdev, tlink) { - tdev->flags |= - ATA_DFLAG_DETACH; - } - } - } - ata_port_schedule_eh(ap); - wait = 1; - } else { + + if (ACPI_FAILURE(status)) { + ata_port_printk(ap, KERN_ERR, + "acpi: failed to determine bay status (0x%x)\n", + status); + break; + } + + if (sta) { ata_ehi_hotplugged(ehi); ata_port_freeze(ap); + } else { + /* The device has gone - unplug it */ + ata_acpi_detach_device(ap, dev); + wait = 1; } + break; + case ACPI_NOTIFY_EJECT_REQUEST: + ata_ehi_push_desc(ehi, "ACPI event"); + + if (!is_dock_event) + break; + + /* undock event - immediate unplug */ + ata_acpi_detach_device(ap, dev); + wait = 1; + break; } + /* make sure kobj doesn't go away while ap->lock is released */ + kobject_get(kobj); + spin_unlock_irqrestore(ap->lock, flags); - if (wait) + if (wait) { ata_port_wait_eh(ap); + ata_acpi_eject_device(handle); + } - if (dev) { - if (dev->sdev) - kobj = &dev->sdev->sdev_gendev.kobj; - } else - kobj = &ap->dev->kobj; - - if (kobj) { + if (kobj && !is_dock_event) { sprintf(event_string, "BAY_EVENT=%d", event); kobject_uevent_env(kobj, KOBJ_CHANGE, envp); } + + kobject_put(kobj); +} + +static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) +{ + struct ata_device *dev = data; + + ata_acpi_handle_hotplug(dev->link->ap, dev, event, 1); +} + +static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) +{ + struct ata_port *ap = data; + + ata_acpi_handle_hotplug(ap, NULL, event, 1); } static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) { struct ata_device *dev = data; - ata_acpi_handle_hotplug(NULL, dev, event); + ata_acpi_handle_hotplug(dev->link->ap, dev, event, 0); } static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) { struct ata_port *ap = data; - ata_acpi_handle_hotplug(ap, NULL, event); + ata_acpi_handle_hotplug(ap, NULL, event, 0); } /** @@ -252,7 +320,7 @@ void ata_acpi_associate(struct ata_host *host) ata_acpi_ap_notify, ap); /* we might be on a docking station */ register_hotplug_dock_device(ap->acpi_handle, - ata_acpi_ap_notify, ap); + ata_acpi_ap_notify_dock, ap); } for (j = 0; j < ata_link_max_devices(&ap->link); j++) { @@ -264,7 +332,7 @@ void ata_acpi_associate(struct ata_host *host) ata_acpi_dev_notify, dev); /* we might be on a docking station */ register_hotplug_dock_device(dev->acpi_handle, - ata_acpi_dev_notify, dev); + ata_acpi_dev_notify_dock, dev); } } } @@ -627,6 +695,14 @@ static int ata_acpi_filter_tf(const struct ata_taskfile *tf, return 1; } + if (ata_acpi_gtf_filter & ATA_ACPI_FILTER_DIPM) { + /* inhibit enabling DIPM */ + if (tf->command == ATA_CMD_SET_FEATURES && + tf->feature == SETFEATURES_SATA_ENABLE && + tf->nsect == SATA_DIPM) + return 1; + } + return 0; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3c89f205c83..5ba96c5052c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -54,7 +54,6 @@ #include <linux/completion.h> #include <linux/suspend.h> #include <linux/workqueue.h> -#include <linux/jiffies.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <scsi/scsi.h> @@ -121,7 +120,7 @@ static char ata_force_param_buf[PAGE_SIZE] __initdata; module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); -int atapi_enabled = 1; +static int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); @@ -145,7 +144,7 @@ static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CF module_param_named(dma, libata_dma_mask, int, 0444); MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); -static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; +static int ata_probe_timeout; module_param(ata_probe_timeout, int, 0444); MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); @@ -1133,6 +1132,8 @@ void ata_id_string(const u16 *id, unsigned char *s, { unsigned int c; + BUG_ON(len & 1); + while (len > 0) { c = id[ofs] >> 8; *s = c; @@ -1166,8 +1167,6 @@ void ata_id_c_string(const u16 *id, unsigned char *s, { unsigned char *p; - WARN_ON(!(len & 1)); - ata_id_string(id, s, ofs, len - 1); p = s + strnlen(s, len - 1); @@ -1533,7 +1532,7 @@ unsigned long ata_id_xfermask(const u16 *id) * @ap: The ata_port to queue port_task for * @fn: workqueue function to be scheduled * @data: data for @fn to use - * @delay: delay time for workqueue function + * @delay: delay time in msecs for workqueue function * * Schedule @fn(@data) for execution after @delay jiffies using * port_task. There is one port_task per port and it's the @@ -1552,7 +1551,7 @@ void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) ap->port_task_data = data; /* may fail if ata_port_flush_task() in progress */ - queue_delayed_work(ata_wq, &ap->port_task, delay); + queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay)); } /** @@ -1612,6 +1611,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, struct ata_link *link = dev->link; struct ata_port *ap = link->ap; u8 command = tf->command; + int auto_timeout = 0; struct ata_queued_cmd *qc; unsigned int tag, preempted_tag; u32 preempted_sactive, preempted_qc_active; @@ -1684,8 +1684,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); - if (!timeout) - timeout = ata_probe_timeout * 1000 / HZ; + if (!timeout) { + if (ata_probe_timeout) + timeout = ata_probe_timeout * 1000; + else { + timeout = ata_internal_cmd_timeout(dev, command); + auto_timeout = 1; + } + } rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); @@ -1761,6 +1767,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, spin_unlock_irqrestore(ap->lock, flags); + if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) + ata_internal_cmd_timed_out(dev, command); + return err_mask; } @@ -1877,6 +1886,23 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) } /** + * ata_do_dev_read_id - default ID read method + * @dev: device + * @tf: proposed taskfile + * @id: data buffer + * + * Issue the identify taskfile and hand back the buffer containing + * identify data. For some RAID controllers and for pre ATA devices + * this function is wrapped or replaced by the driver + */ +unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id) +{ + return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, + id, sizeof(id[0]) * ATA_ID_WORDS, 0); +} + +/** * ata_dev_read_id - Read ID data from the specified device * @dev: target device * @p_class: pointer to class of the target device (may be changed) @@ -1911,7 +1937,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, if (ata_msg_ctl(ap)) ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); - retry: +retry: ata_tf_init(dev, &tf); switch (class) { @@ -1939,8 +1965,11 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, */ tf.flags |= ATA_TFLAG_POLLING; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, - id, sizeof(id[0]) * ATA_ID_WORDS, 0); + if (ap->ops->read_id) + err_mask = ap->ops->read_id(dev, &tf, id); + else + err_mask = ata_do_dev_read_id(dev, &tf, id); + if (err_mask) { if (err_mask & AC_ERR_NODEV_HINT) { ata_dev_printk(dev, KERN_DEBUG, @@ -2133,6 +2162,16 @@ int ata_dev_configure(struct ata_device *dev) return 0; } + if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && + dev->class == ATA_DEV_ATAPI) { + ata_dev_printk(dev, KERN_WARNING, + "WARNING: ATAPI is %s, device ignored.\n", + atapi_enabled ? "not supported with this driver" + : "disabled"); + ata_dev_disable(dev); + return 0; + } + /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) @@ -3319,7 +3358,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { unsigned long start = jiffies; - unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT; + unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); int warned = 0; if (time_after(nodev_deadline, deadline)) @@ -3387,7 +3426,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { - msleep(ATA_WAIT_AFTER_RESET_MSECS); + msleep(ATA_WAIT_AFTER_RESET); return ata_wait_ready(link, deadline, check_ready); } @@ -3417,13 +3456,13 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int sata_link_debounce(struct ata_link *link, const unsigned long *params, unsigned long deadline) { - unsigned long interval_msec = params[0]; - unsigned long duration = msecs_to_jiffies(params[1]); + unsigned long interval = params[0]; + unsigned long duration = params[1]; unsigned long last_jiffies, t; u32 last, cur; int rc; - t = jiffies + msecs_to_jiffies(params[2]); + t = ata_deadline(jiffies, params[2]); if (time_before(t, deadline)) deadline = t; @@ -3435,7 +3474,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, last_jiffies = jiffies; while (1) { - msleep(interval_msec); + msleep(interval); if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; @@ -3444,7 +3483,8 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, if (cur == last) { if (cur == 1 && time_before(jiffies, deadline)) continue; - if (time_after(jiffies, last_jiffies + duration)) + if (time_after(jiffies, + ata_deadline(last_jiffies, duration))) return 0; continue; } @@ -3636,7 +3676,8 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, if (check_ready) { unsigned long pmp_deadline; - pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT; + pmp_deadline = ata_deadline(jiffies, + ATA_TMOUT_PMP_SRST_WAIT); if (time_after(pmp_deadline, deadline)) pmp_deadline = deadline; ata_wait_ready(link, pmp_deadline, check_ready); @@ -4297,7 +4338,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) } /** - * ata_check_atapi_dma - Check whether ATAPI DMA can be supported + * atapi_check_dma - Check whether ATAPI DMA can be supported * @qc: Metadata associated with taskfile to check * * Allow low-level driver to filter ATA PACKET commands, returning @@ -4310,7 +4351,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) * RETURNS: 0 when ATAPI DMA can be used * nonzero otherwise */ -int ata_check_atapi_dma(struct ata_queued_cmd *qc) +int atapi_check_dma(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; @@ -5403,7 +5444,7 @@ static void ata_host_stop(struct device *gendev, void *res) */ static void ata_finalize_port_ops(struct ata_port_operations *ops) { - static spinlock_t lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(lock); const struct ata_port_operations *cur; void **begin = (void **)ops; void **end = (void **)&ops->inherits; @@ -6073,22 +6114,24 @@ static void __init ata_parse_force_param(void) static int __init ata_init(void) { - ata_probe_timeout *= HZ; - ata_parse_force_param(); ata_wq = create_workqueue("ata"); if (!ata_wq) - return -ENOMEM; + goto free_force_tbl; ata_aux_wq = create_singlethread_workqueue("ata_aux"); - if (!ata_aux_wq) { - destroy_workqueue(ata_wq); - return -ENOMEM; - } + if (!ata_aux_wq) + goto free_wq; printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; + +free_wq: + destroy_workqueue(ata_wq); +free_force_tbl: + kfree(ata_force_tbl); + return -ENOMEM; } static void __exit ata_exit(void) @@ -6127,8 +6170,8 @@ int ata_ratelimit(void) * @reg: IO-mapped register * @mask: Mask to apply to read register value * @val: Wait condition - * @interval_msec: polling interval in milliseconds - * @timeout_msec: timeout in milliseconds + * @interval: polling interval in milliseconds + * @timeout: timeout in milliseconds * * Waiting for some bits of register to change is a common * operation for ATA controllers. This function reads 32bit LE @@ -6146,10 +6189,9 @@ int ata_ratelimit(void) * The final register value. */ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, - unsigned long interval_msec, - unsigned long timeout_msec) + unsigned long interval, unsigned long timeout) { - unsigned long timeout; + unsigned long deadline; u32 tmp; tmp = ioread32(reg); @@ -6158,10 +6200,10 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, * preceding writes reach the controller before starting to * eat away the timeout. */ - timeout = jiffies + (timeout_msec * HZ) / 1000; + deadline = ata_deadline(jiffies, timeout); - while ((tmp & mask) == val && time_before(jiffies, timeout)) { - msleep(interval_msec); + while ((tmp & mask) == val && time_before(jiffies, deadline)) { + msleep(interval); tmp = ioread32(reg); } @@ -6261,6 +6303,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume); #endif /* CONFIG_PM */ EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); +EXPORT_SYMBOL_GPL(ata_do_dev_read_id); EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_pio_need_iordy); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 7894d83ea1e..58bdc538d22 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -66,15 +66,19 @@ enum { ATA_ECAT_DUBIOUS_TOUT_HSM = 6, ATA_ECAT_DUBIOUS_UNK_DEV = 7, ATA_ECAT_NR = 8, -}; -/* Waiting in ->prereset can never be reliable. It's sometimes nice - * to wait there but it can't be depended upon; otherwise, we wouldn't - * be resetting. Just give it enough time for most drives to spin up. - */ -enum { - ATA_EH_PRERESET_TIMEOUT = 10 * HZ, - ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ, + ATA_EH_CMD_DFL_TIMEOUT = 5000, + + /* always put at least this amount of time between resets */ + ATA_EH_RESET_COOL_DOWN = 5000, + + /* Waiting in ->prereset can never be reliable. It's + * sometimes nice to wait there but it can't be depended upon; + * otherwise, we wouldn't be resetting. Just give it enough + * time for most drives to spin up. + */ + ATA_EH_PRERESET_TIMEOUT = 10000, + ATA_EH_FASTDRAIN_INTERVAL = 3000, }; /* The following table determines how we sequence resets. Each entry @@ -84,12 +88,59 @@ enum { * are mostly for error handling, hotplug and retarded devices. */ static const unsigned long ata_eh_reset_timeouts[] = { - 10 * HZ, /* most drives spin up by 10sec */ - 10 * HZ, /* > 99% working drives spin up before 20sec */ - 35 * HZ, /* give > 30 secs of idleness for retarded devices */ - 5 * HZ, /* and sweet one last chance */ - /* > 1 min has elapsed, give up */ + 10000, /* most drives spin up by 10sec */ + 10000, /* > 99% working drives spin up before 20sec */ + 35000, /* give > 30 secs of idleness for retarded devices */ + 5000, /* and sweet one last chance */ + ULONG_MAX, /* > 1 min has elapsed, give up */ +}; + +static const unsigned long ata_eh_identify_timeouts[] = { + 5000, /* covers > 99% of successes and not too boring on failures */ + 10000, /* combined time till here is enough even for media access */ + 30000, /* for true idiots */ + ULONG_MAX, +}; + +static const unsigned long ata_eh_other_timeouts[] = { + 5000, /* same rationale as identify timeout */ + 10000, /* ditto */ + /* but no merciful 30sec for other commands, it just isn't worth it */ + ULONG_MAX, +}; + +struct ata_eh_cmd_timeout_ent { + const u8 *commands; + const unsigned long *timeouts; +}; + +/* The following table determines timeouts to use for EH internal + * commands. Each table entry is a command class and matches the + * commands the entry applies to and the timeout table to use. + * + * On the retry after a command timed out, the next timeout value from + * the table is used. If the table doesn't contain further entries, + * the last value is used. + * + * ehc->cmd_timeout_idx keeps track of which timeout to use per + * command class, so if SET_FEATURES times out on the first try, the + * next try will use the second timeout value only for that class. + */ +#define CMDS(cmds...) (const u8 []){ cmds, 0 } +static const struct ata_eh_cmd_timeout_ent +ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { + { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), + .timeouts = ata_eh_identify_timeouts, }, + { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_SET_FEATURES), + .timeouts = ata_eh_other_timeouts, }, + { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), + .timeouts = ata_eh_other_timeouts, }, }; +#undef CMDS static void __ata_port_freeze(struct ata_port *ap); #ifdef CONFIG_PM @@ -236,6 +287,73 @@ void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, #endif /* CONFIG_PCI */ +static int ata_lookup_timeout_table(u8 cmd) +{ + int i; + + for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { + const u8 *cur; + + for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) + if (*cur == cmd) + return i; + } + + return -1; +} + +/** + * ata_internal_cmd_timeout - determine timeout for an internal command + * @dev: target device + * @cmd: internal command to be issued + * + * Determine timeout for internal command @cmd for @dev. + * + * LOCKING: + * EH context. + * + * RETURNS: + * Determined timeout. + */ +unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + int ent = ata_lookup_timeout_table(cmd); + int idx; + + if (ent < 0) + return ATA_EH_CMD_DFL_TIMEOUT; + + idx = ehc->cmd_timeout_idx[dev->devno][ent]; + return ata_eh_cmd_timeout_table[ent].timeouts[idx]; +} + +/** + * ata_internal_cmd_timed_out - notification for internal command timeout + * @dev: target device + * @cmd: internal command which timed out + * + * Notify EH that internal command @cmd for @dev timed out. This + * function should be called only for commands whose timeouts are + * determined using ata_internal_cmd_timeout(). + * + * LOCKING: + * EH context. + */ +void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) +{ + struct ata_eh_context *ehc = &dev->link->eh_context; + int ent = ata_lookup_timeout_table(cmd); + int idx; + + if (ent < 0) + return; + + idx = ehc->cmd_timeout_idx[dev->devno][ent]; + if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) + ehc->cmd_timeout_idx[dev->devno][ent]++; +} + static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, unsigned int err_mask) { @@ -486,6 +604,9 @@ void ata_scsi_error(struct Scsi_Host *host) if (ata_ncq_enabled(dev)) ehc->saved_ncq_enabled |= 1 << devno; } + + /* set last reset timestamp to some time in the past */ + ehc->last_reset = jiffies - 60 * HZ; } ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; @@ -641,7 +762,7 @@ void ata_eh_fastdrain_timerfn(unsigned long arg) /* some qcs have finished, give it another chance */ ap->fastdrain_cnt = cnt; ap->fastdrain_timer.expires = - jiffies + ATA_EH_FASTDRAIN_INTERVAL; + ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } @@ -681,7 +802,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) /* activate fast drain */ ap->fastdrain_cnt = cnt; - ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL; + ap->fastdrain_timer.expires = + ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); add_timer(&ap->fastdrain_timer); } @@ -1238,6 +1360,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev, * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE * @dev: device to perform REQUEST_SENSE to * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) + * @dfl_sense_key: default sense key to use * * Perform ATAPI REQUEST_SENSE after the device reported CHECK * SENSE. This function is EH helper. @@ -1248,13 +1371,13 @@ static int ata_eh_read_log_10h(struct ata_device *dev, * RETURNS: * 0 on success, AC_ERR_* mask on failure */ -static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) +static unsigned int atapi_eh_request_sense(struct ata_device *dev, + u8 *sense_buf, u8 dfl_sense_key) { - struct ata_device *dev = qc->dev; - unsigned char *sense_buf = qc->scsicmd->sense_buffer; + u8 cdb[ATAPI_CDB_LEN] = + { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; struct ata_port *ap = dev->link->ap; struct ata_taskfile tf; - u8 cdb[ATAPI_CDB_LEN]; DPRINTK("ATAPI request sense\n"); @@ -1265,15 +1388,11 @@ static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) * for the case where they are -not- overwritten */ sense_buf[0] = 0x70; - sense_buf[2] = qc->result_tf.feature >> 4; + sense_buf[2] = dfl_sense_key; /* some devices time out if garbage left in tf */ ata_tf_init(dev, &tf); - memset(cdb, 0, ATAPI_CDB_LEN); - cdb[0] = REQUEST_SENSE; - cdb[4] = SCSI_SENSE_BUFFERSIZE; - tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; @@ -1445,7 +1564,9 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, case ATA_DEV_ATAPI: if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { - tmp = atapi_eh_request_sense(qc); + tmp = atapi_eh_request_sense(qc->dev, + qc->scsicmd->sense_buffer, + qc->result_tf.feature >> 4); if (!tmp) { /* ATA_QCFLAG_SENSE_VALID is used to * tell atapi_qc_complete() that sense @@ -2071,13 +2192,12 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { - const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts); struct ata_port *ap = link->ap; struct ata_eh_context *ehc = &link->eh_context; unsigned int *classes = ehc->classes; unsigned int lflags = link->flags; int verbose = !(ehc->i.flags & ATA_EHI_QUIET); - int try = 0; + int max_tries = 0, try = 0; struct ata_device *dev; unsigned long deadline, now; ata_reset_fn_t reset; @@ -2088,11 +2208,20 @@ int ata_eh_reset(struct ata_link *link, int classify, /* * Prepare to reset */ + while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) + max_tries++; + + now = jiffies; + deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); + if (time_before(now, deadline)) + schedule_timeout_uninterruptible(deadline - now); + spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_RESETTING; spin_unlock_irqrestore(ap->lock, flags); ata_eh_about_to_do(link, NULL, ATA_EH_RESET); + ehc->last_reset = jiffies; ata_link_for_each_dev(dev, link) { /* If we issue an SRST then an ATA drive (not ATAPI) @@ -2125,7 +2254,8 @@ int ata_eh_reset(struct ata_link *link, int classify, } if (prereset) { - rc = prereset(link, jiffies + ATA_EH_PRERESET_TIMEOUT); + rc = prereset(link, + ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); if (rc) { if (rc == -ENOENT) { ata_link_printk(link, KERN_DEBUG, @@ -2157,10 +2287,11 @@ int ata_eh_reset(struct ata_link *link, int classify, /* * Perform reset */ + ehc->last_reset = jiffies; if (ata_is_host_link(link)) ata_eh_freeze_port(ap); - deadline = jiffies + ata_eh_reset_timeouts[try++]; + deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); if (reset) { if (verbose) @@ -2277,6 +2408,7 @@ int ata_eh_reset(struct ata_link *link, int classify, /* reset successful, schedule revalidation */ ata_eh_done(link, NULL, ATA_EH_RESET); + ehc->last_reset = jiffies; ehc->i.action |= ATA_EH_REVALIDATE; rc = 0; @@ -2303,9 +2435,9 @@ int ata_eh_reset(struct ata_link *link, int classify, if (time_before(now, deadline)) { unsigned long delta = deadline - now; - ata_link_printk(link, KERN_WARNING, "reset failed " - "(errno=%d), retrying in %u secs\n", - rc, (jiffies_to_msecs(delta) + 999) / 1000); + ata_link_printk(link, KERN_WARNING, + "reset failed (errno=%d), retrying in %u secs\n", + rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); while (delta) delta = schedule_timeout_uninterruptible(delta); @@ -2583,8 +2715,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) ata_eh_detach_dev(dev); /* schedule probe if necessary */ - if (ata_eh_schedule_probe(dev)) + if (ata_eh_schedule_probe(dev)) { ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + memset(ehc->cmd_timeout_idx[dev->devno], 0, + sizeof(ehc->cmd_timeout_idx[dev->devno])); + } return 1; } else { @@ -2622,7 +2757,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, { struct ata_link *link; struct ata_device *dev; - int nr_failed_devs, nr_disabled_devs; + int nr_failed_devs; int rc; unsigned long flags; @@ -2665,7 +2800,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, retry: rc = 0; nr_failed_devs = 0; - nr_disabled_devs = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) @@ -2732,8 +2866,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, dev_fail: nr_failed_devs++; - if (ata_eh_handle_dev_fail(dev, rc)) - nr_disabled_devs++; + ata_eh_handle_dev_fail(dev, rc); if (ap->pflags & ATA_PFLAG_FROZEN) { /* PMP reset requires working host port. @@ -2745,18 +2878,8 @@ dev_fail: } } - if (nr_failed_devs) { - if (nr_failed_devs != nr_disabled_devs) { - ata_port_printk(ap, KERN_WARNING, "failed to recover " - "some devices, retrying in 5 secs\n"); - ssleep(5); - } else { - /* no device left to recover, repeat fast */ - msleep(500); - } - + if (nr_failed_devs) goto retry; - } out: if (rc && r_failed_link) diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 0f9386d4a5a..b65db309c18 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -322,9 +322,12 @@ static void sata_pmp_quirks(struct ata_port *ap) if (vendor == 0x1095 && devid == 0x3726) { /* sil3726 quirks */ ata_port_for_each_link(link, ap) { - /* class code report is unreliable */ + /* Class code report is unreliable and SRST + * times out under certain configurations. + */ if (link->pmp < 5) - link->flags |= ATA_LFLAG_ASSUME_ATA; + link->flags |= ATA_LFLAG_NO_SRST | + ATA_LFLAG_ASSUME_ATA; /* port 5 is for SEMB device and it doesn't like SRST */ if (link->pmp == 5) @@ -724,19 +727,12 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap, } if (tries) { - int sleep = ehc->i.flags & ATA_EHI_DID_RESET; - /* consecutive revalidation failures? speed down */ if (reval_failed) sata_down_spd_limit(link); else reval_failed = 1; - ata_dev_printk(dev, KERN_WARNING, - "retrying reset%s\n", - sleep ? " in 5 secs" : ""); - if (sleep) - ssleep(5); ehc->i.action |= ATA_EH_RESET; goto retry; } else { @@ -782,7 +778,8 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap) * SError.N working. */ sata_link_hardreset(link, sata_deb_timing_normal, - jiffies + ATA_TMOUT_INTERNAL_QUICK, NULL, NULL); + ata_deadline(jiffies, ATA_TMOUT_INTERNAL_QUICK), + NULL, NULL); /* unconditionally clear SError.N */ rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); @@ -987,10 +984,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap) goto retry; if (--pmp_tries) { - ata_port_printk(ap, KERN_WARNING, - "failed to recover PMP, retrying in 5 secs\n"); pmp_ehc->i.action |= ATA_EH_RESET; - ssleep(5); goto retry; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index aeb6e01d82c..b9d3ba423cb 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -190,6 +190,85 @@ static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); } +static ssize_t +ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_store(ap, buf, count); + return -EINVAL; +} + +static ssize_t +ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_show(ap, buf); + return -EINVAL; +} +DEVICE_ATTR(em_message, S_IRUGO | S_IWUGO, + ata_scsi_em_message_show, ata_scsi_em_message_store); +EXPORT_SYMBOL_GPL(dev_attr_em_message); + +static ssize_t +ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + return snprintf(buf, 23, "%d\n", ap->em_message_type); +} +DEVICE_ATTR(em_message_type, S_IRUGO, + ata_scsi_em_message_type_show, NULL); +EXPORT_SYMBOL_GPL(dev_attr_em_message_type); + +static ssize_t +ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + + if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY)) + return ap->ops->sw_activity_show(atadev, buf); + return -EINVAL; +} + +static ssize_t +ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + enum sw_activity val; + int rc; + + if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + val = simple_strtoul(buf, NULL, 0); + switch (val) { + case OFF: case BLINK_ON: case BLINK_OFF: + rc = ap->ops->sw_activity_store(atadev, val); + if (!rc) + return count; + else + return rc; + } + } + return -EINVAL; +} +DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show, + ata_scsi_activity_store); +EXPORT_SYMBOL_GPL(dev_attr_sw_activity); + static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { @@ -885,7 +964,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, /* set the min alignment and padding */ blk_queue_update_dma_alignment(sdev->request_queue, ATA_DMA_PAD_SZ - 1); - blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); + blk_queue_update_dma_pad(sdev->request_queue, + ATA_DMA_PAD_SZ - 1); /* configure draining */ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); @@ -1637,6 +1717,7 @@ defer: /** * ata_scsi_rbuf_get - Map response buffer. + * @cmd: SCSI command containing buffer to be mapped. * @flags: unsigned long variable to store irq enable status * @copy_in: copy in from user buffer * @@ -1777,7 +1858,9 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) const u8 pages[] = { 0x00, /* page 0x00, this page */ 0x80, /* page 0x80, unit serial no page */ - 0x83 /* page 0x83, device ident page */ + 0x83, /* page 0x83, device ident page */ + 0x89, /* page 0x89, ata info page */ + 0xb1, /* page 0xb1, block device characteristics page */ }; rbuf[3] = sizeof(pages); /* number of supported VPD pages */ @@ -1898,6 +1981,19 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) return 0; } +static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) +{ + rbuf[1] = 0xb1; + rbuf[3] = 0x3c; + if (ata_id_major_version(args->id) > 7) { + rbuf[4] = args->id[217] >> 8; + rbuf[5] = args->id[217]; + rbuf[7] = args->id[168] & 0xf; + } + + return 0; +} + /** * ata_scsiop_noop - Command handler that simply returns success. * @args: device IDENTIFY data / SCSI command of interest. @@ -1954,7 +2050,7 @@ static unsigned int ata_msense_ctl_mode(u8 *buf) /** * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page - * @bufp: output buffer + * @buf: output buffer * * Generate a generic MODE SENSE r/w error recovery page. * @@ -2342,8 +2438,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; - int using_pio = (dev->flags & ATA_DFLAG_PIO); int nodata = (scmd->sc_data_direction == DMA_NONE); + int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); unsigned int nbytes; memset(qc->cdb, 0, dev->cdb_len); @@ -2361,7 +2457,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) ata_qc_set_pc_nbytes(qc); /* check whether ATAPI DMA is safe */ - if (!using_pio && ata_check_atapi_dma(qc)) + if (!nodata && !using_pio && atapi_check_dma(qc)) using_pio = 1; /* Some controller variants snoop this value for Packet @@ -2401,13 +2497,11 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) qc->tf.lbam = (nbytes & 0xFF); qc->tf.lbah = (nbytes >> 8); - if (using_pio || nodata) { - /* no data, or PIO data xfer */ - if (nodata) - qc->tf.protocol = ATAPI_PROT_NODATA; - else - qc->tf.protocol = ATAPI_PROT_PIO; - } else { + if (nodata) + qc->tf.protocol = ATAPI_PROT_NODATA; + else if (using_pio) + qc->tf.protocol = ATAPI_PROT_PIO; + else { /* DMA data xfer */ qc->tf.protocol = ATAPI_PROT_DMA; qc->tf.feature |= ATAPI_PKT_DMA; @@ -2457,36 +2551,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, } /** - * ata_scsi_dev_enabled - determine if device is enabled - * @dev: ATA device - * - * Determine if commands should be sent to the specified device. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * 0 if commands are not allowed / 1 if commands are allowed - */ - -static int ata_scsi_dev_enabled(struct ata_device *dev) -{ - if (unlikely(!ata_dev_enabled(dev))) - return 0; - - if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { - if (unlikely(dev->class == ATA_DEV_ATAPI)) { - ata_dev_printk(dev, KERN_WARNING, - "WARNING: ATAPI is %s, device ignored.\n", - atapi_enabled ? "not supported with this driver" : "disabled"); - return 0; - } - } - - return 1; -} - -/** * ata_scsi_find_dev - lookup ata_device from scsi_cmnd * @ap: ATA port to which the device is attached * @scsidev: SCSI device from which we derive the ATA device @@ -2507,7 +2571,7 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) { struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); - if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) + if (unlikely(!dev || !ata_dev_enabled(dev))) return NULL; return dev; @@ -2921,6 +2985,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case 0x89: ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); break; + case 0xb1: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); + break; default: ata_scsi_invalid_field(cmd, done); break; @@ -3525,7 +3592,7 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), ata_scsi_dump_cdb(ap, cmd); - if (likely(ata_scsi_dev_enabled(ap->link.device))) + if (likely(ata_dev_enabled(ap->link.device))) rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); else { cmd->result = (DID_BAD_TARGET << 16); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3c2d2289f85..304fdc6f1dc 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -u8 ata_sff_altstatus(struct ata_port *ap) +static u8 ata_sff_altstatus(struct ata_port *ap) { if (ap->ops->sff_check_altstatus) return ap->ops->sff_check_altstatus(ap); @@ -256,10 +256,97 @@ u8 ata_sff_altstatus(struct ata_port *ap) } /** + * ata_sff_irq_status - Check if the device is busy + * @ap: port where the device is + * + * Determine if the port is currently busy. Uses altstatus + * if available in order to avoid clearing shared IRQ status + * when finding an IRQ source. Non ctl capable devices don't + * share interrupt lines fortunately for us. + * + * LOCKING: + * Inherited from caller. + */ +static u8 ata_sff_irq_status(struct ata_port *ap) +{ + u8 status; + + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + status = ata_sff_altstatus(ap); + /* Not us: We are busy */ + if (status & ATA_BUSY) + return status; + } + /* Clear INTRQ latch */ + status = ap->ops->sff_check_status(ap); + return status; +} + +/** + * ata_sff_sync - Flush writes + * @ap: Port to wait for. + * + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_sff_sync(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus) + ap->ops->sff_check_altstatus(ap); + else if (ap->ioaddr.altstatus_addr) + ioread8(ap->ioaddr.altstatus_addr); +} + +/** + * ata_sff_pause - Flush writes and wait 400nS + * @ap: Port to pause for. + * + * CAUTION: + * If we have an mmio device with no ctl and no altstatus + * method this will fail. No such devices are known to exist. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_sff_pause(struct ata_port *ap) +{ + ata_sff_sync(ap); + ndelay(400); +} + +/** + * ata_sff_dma_pause - Pause before commencing DMA + * @ap: Port to pause for. + * + * Perform I/O fencing and ensure sufficient cycle delays occur + * for the HDMA1:0 transition + */ + +void ata_sff_dma_pause(struct ata_port *ap) +{ + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { + /* An altstatus read will cause the needed delay without + messing up the IRQ status */ + ata_sff_altstatus(ap); + return; + } + /* There are no DMA controllers without ctl. BUG here to ensure + we never violate the HDMA1:0 transition timing and risk + corruption. */ + BUG(); +} + +/** * ata_sff_busy_sleep - sleep until BSY clears, or timeout * @ap: port containing status register to be polled - * @tmout_pat: impatience timeout - * @tmout: overall timeout + * @tmout_pat: impatience timeout in msecs + * @tmout: overall timeout in msecs * * Sleep until ATA Status register bit BSY clears, * or a timeout occurs. @@ -278,7 +365,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, status = ata_sff_busy_wait(ap, ATA_BUSY, 300); timer_start = jiffies; - timeout = timer_start + tmout_pat; + timeout = ata_deadline(timer_start, tmout_pat); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { msleep(50); @@ -290,7 +377,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, "port is slow to respond, please be patient " "(Status 0x%x)\n", status); - timeout = timer_start + tmout; + timeout = ata_deadline(timer_start, tmout); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { msleep(50); @@ -303,7 +390,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, if (status & ATA_BUSY) { ata_port_printk(ap, KERN_ERR, "port failed to respond " "(%lu secs, Status 0x%x)\n", - tmout / HZ, status); + DIV_ROUND_UP(tmout, 1000), status); return -EBUSY; } @@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) } else ata_pio_sector(qc); - ata_sff_altstatus(qc->ap); /* flush */ + ata_sff_sync(qc->ap); /* flush */ } /** @@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) WARN_ON(qc->dev->cdb_len < 12); ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); - ata_sff_altstatus(ap); /* flush */ - + ata_sff_sync(ap); + /* FIXME: If the CDB is for DMA do we need to do the transition delay + or is bmdma_start guaranteed to do it ? */ switch (qc->tf.protocol) { case ATAPI_PROT_PIO: ap->hsm_task_state = HSM_ST; @@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) if (unlikely(__atapi_pio_bytes(qc, bytes))) goto err_out; - ata_sff_altstatus(ap); /* flush */ + ata_sff_sync(ap); /* flush */ return; @@ -1006,6 +1094,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq) { + struct ata_eh_info *ehi = &ap->link.eh_info; unsigned long flags = 0; int poll_next; @@ -1037,9 +1126,12 @@ fsm_start: if (likely(status & (ATA_ERR | ATA_DF))) /* device stops HSM for abort/error */ qc->err_mask |= AC_ERR_DEV; - else + else { /* HSM violation. Let EH handle this */ + ata_ehi_push_desc(ehi, + "ST_FIRST: !(DRQ|ERR|DF)"); qc->err_mask |= AC_ERR_HSM; + } ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1058,9 +1150,9 @@ fsm_start: * the CDB. */ if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { - ata_port_printk(ap, KERN_WARNING, - "DRQ=1 with device error, " - "dev_stat 0x%X\n", status); + ata_ehi_push_desc(ehi, "ST_FIRST: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1117,9 +1209,9 @@ fsm_start: * let the EH abort the command or reset the device. */ if (unlikely(status & (ATA_ERR | ATA_DF))) { - ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " - "device error, dev_stat 0x%X\n", - status); + ata_ehi_push_desc(ehi, "ST-ATAPI: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1138,13 +1230,17 @@ fsm_start: if (likely(status & (ATA_ERR | ATA_DF))) /* device stops HSM for abort/error */ qc->err_mask |= AC_ERR_DEV; - else + else { /* HSM violation. Let EH handle this. * Phantom devices also trigger this * condition. Mark hint. */ + ata_ehi_push_desc(ehi, "ST-ATA: " + "DRQ=1 with device error, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT; + } ap->hsm_task_state = HSM_ST_ERR; goto fsm_start; @@ -1169,8 +1265,12 @@ fsm_start: status = ata_wait_idle(ap); } - if (status & (ATA_BUSY | ATA_DRQ)) + if (status & (ATA_BUSY | ATA_DRQ)) { + ata_ehi_push_desc(ehi, "ST-ATA: " + "BUSY|DRQ persists on ERR|DF, " + "dev_stat 0x%X", status); qc->err_mask |= AC_ERR_HSM; + } /* ata_pio_sectors() might change the * state to HSM_ST_LAST. so, the state @@ -1489,14 +1589,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap, goto idle_irq; } - /* check altstatus */ - status = ata_sff_altstatus(ap); - if (status & ATA_BUSY) - goto idle_irq; - /* check main status, clearing INTRQ */ - status = ap->ops->sff_check_status(ap); - if (unlikely(status & ATA_BUSY)) + /* check main status, clearing INTRQ if needed */ + status = ata_sff_irq_status(ap); + if (status & ATA_BUSY) goto idle_irq; /* ack bmdma irq events */ @@ -1792,7 +1888,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned int dev1 = devmask & (1 << 1); int rc, ret = 0; - msleep(ATA_WAIT_AFTER_RESET_MSECS); + msleep(ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); @@ -2030,7 +2126,7 @@ void ata_sff_error_handler(struct ata_port *ap) ap->ops->bmdma_stop(qc); } - ata_sff_altstatus(ap); + ata_sff_sync(ap); /* FIXME: We don't need this */ ap->ops->sff_check_status(ap); ap->ops->sff_irq_clear(ap); @@ -2203,7 +2299,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) mmio + ATA_DMA_CMD); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_altstatus(ap); /* dummy read */ + ata_sff_dma_pause(ap); } /** @@ -2275,7 +2371,8 @@ void ata_bus_reset(struct ata_port *ap) /* issue bus reset */ if (ap->flags & ATA_FLAG_SRST) { - rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); + rc = ata_bus_softreset(ap, devmask, + ata_deadline(jiffies, 40000)); if (rc && rc != -ENODEV) goto err_out; } @@ -2722,7 +2819,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep); EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); EXPORT_SYMBOL_GPL(ata_sff_dev_select); EXPORT_SYMBOL_GPL(ata_sff_check_status); -EXPORT_SYMBOL_GPL(ata_sff_altstatus); +EXPORT_SYMBOL_GPL(ata_sff_dma_pause); +EXPORT_SYMBOL_GPL(ata_sff_pause); EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); EXPORT_SYMBOL_GPL(ata_sff_wait_ready); EXPORT_SYMBOL_GPL(ata_sff_tf_load); diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 4514283937e..ade5c75b614 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -66,7 +66,6 @@ enum { extern unsigned int ata_print_id; extern struct workqueue_struct *ata_aux_wq; -extern int atapi_enabled; extern int atapi_passthru16; extern int libata_fua; extern int libata_noacpi; @@ -106,7 +105,7 @@ extern void ata_sg_clean(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc); extern void __ata_qc_complete(struct ata_queued_cmd *qc); -extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); +extern int atapi_check_dma(struct ata_queued_cmd *qc); extern void swap_buf_le16(u16 *buf, unsigned int buf_words); extern void ata_dev_init(struct ata_device *dev); extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); @@ -151,6 +150,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_bus_probe(struct ata_port *ap); /* libata-eh.c */ +extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); +extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern void ata_scsi_error(struct Scsi_Host *host); extern void ata_port_wait_eh(struct ata_port *ap); diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 0f3e659db99..5ca70fa1f58 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -550,8 +550,9 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_read_config_byte(isa_bridge, 0x5E, &tmp); if ((tmp & 0x1E) == 0x12) ppi[0] = &info_20_udma; - pci_dev_put(isa_bridge); } + pci_dev_put(isa_bridge); + return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); } diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 55516103626..d3932901a3b 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1011,7 +1011,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; unsigned int dev0 = devmask & (1 << 0); unsigned int dev1 = devmask & (1 << 1); - unsigned long timeout; + unsigned long deadline; /* if device 0 was found in ata_devchk, wait for its * BSY bit to clear @@ -1022,7 +1022,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) /* if device 1 was found in ata_devchk, wait for * register access, then wait for BSY to clear */ - timeout = jiffies + ATA_TMOUT_BOOT; + deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); while (dev1) { u8 nsect, lbal; @@ -1031,7 +1031,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) lbal = read_atapi_register(base, ATA_REG_LBAL); if ((nsect == 1) && (lbal == 1)) break; - if (time_after(jiffies, timeout)) { + if (time_after(jiffies, deadline)) { dev1 = 0; break; } diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 17138436423..cf9e9848f8b 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -270,7 +270,7 @@ static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) disable_dma(state->dma); /* see ata_bmdma_stop */ - ata_sff_altstatus(ap); + ata_sff_dma_pause(ap); } static u8 pata_icside_bmdma_status(struct ata_port *ap) diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index e10816931b2..27843c70eb9 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -80,7 +80,7 @@ #define DRV_NAME "pata_it821x" -#define DRV_VERSION "0.3.8" +#define DRV_VERSION "0.4.0" struct it821x_dev { @@ -425,6 +425,8 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc) case ATA_CMD_WRITE_MULTI: case ATA_CMD_WRITE_MULTI_EXT: case ATA_CMD_ID_ATA: + case ATA_CMD_INIT_DEV_PARAMS: + case 0xFC: /* Internal 'report rebuild state' */ /* Arguably should just no-op this one */ case ATA_CMD_SET_FEATURES: return ata_sff_qc_issue(qc); @@ -509,7 +511,7 @@ static void it821x_dev_config(struct ata_device *adev) if (strstr(model_num, "Integrated Technology Express")) { /* RAID mode */ - printk(KERN_INFO "IT821x %sRAID%d volume", + ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume", adev->id[147]?"Bootable ":"", adev->id[129]); if (adev->id[129] != 1) @@ -519,37 +521,51 @@ static void it821x_dev_config(struct ata_device *adev) /* This is a controller firmware triggered funny, don't report the drive faulty! */ adev->horkage &= ~ATA_HORKAGE_DIAGNOSTIC; + /* No HPA in 'smart' mode */ + adev->horkage |= ATA_HORKAGE_BROKEN_HPA; } /** - * it821x_ident_hack - Hack identify data up - * @ap: Port + * it821x_read_id - Hack identify data up + * @adev: device to read + * @tf: proposed taskfile + * @id: buffer for returned ident data * - * Walk the devices on this firmware driven port and slightly + * Query the devices on this firmware driven port and slightly * mash the identify data to stop us and common tools trying to * use features not firmware supported. The firmware itself does * some masking (eg SMART) but not enough. - * - * This is a bit of an abuse of the cable method, but it is the - * only method called at the right time. We could modify the libata - * core specifically for ident hacking but while we have one offender - * it seems better to keep the fallout localised. */ -static int it821x_ident_hack(struct ata_port *ap) +static unsigned int it821x_read_id(struct ata_device *adev, + struct ata_taskfile *tf, u16 *id) { - struct ata_device *adev; - ata_link_for_each_dev(adev, &ap->link) { - if (ata_dev_enabled(adev)) { - adev->id[84] &= ~(1 << 6); /* No FUA */ - adev->id[85] &= ~(1 << 10); /* No HPA */ - adev->id[76] = 0; /* No NCQ/AN etc */ - } + unsigned int err_mask; + unsigned char model_num[ATA_ID_PROD_LEN + 1]; + + err_mask = ata_do_dev_read_id(adev, tf, id); + if (err_mask) + return err_mask; + ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num)); + + id[83] &= ~(1 << 12); /* Cache flush is firmware handled */ + id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */ + id[84] &= ~(1 << 6); /* No FUA */ + id[85] &= ~(1 << 10); /* No HPA */ + id[76] = 0; /* No NCQ/AN etc */ + + if (strstr(model_num, "Integrated Technology Express")) { + /* Set feature bits the firmware neglects */ + id[49] |= 0x0300; /* LBA, DMA */ + id[82] |= 0x0400; /* LBA48 */ + id[83] &= 0x7FFF; + id[83] |= 0x4000; /* Word 83 is valid */ + id[86] |= 0x0400; /* LBA48 on */ + id[ATA_ID_MAJOR_VER] |= 0x1F; } - return ata_cable_unknown(ap); + return err_mask; } - /** * it821x_check_atapi_dma - ATAPI DMA handler * @qc: Command we are about to issue @@ -577,6 +593,136 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc) return 0; } +/** + * it821x_display_disk - display disk setup + * @n: Device number + * @buf: Buffer block from firmware + * + * Produce a nice informative display of the device setup as provided + * by the firmware. + */ + +static void it821x_display_disk(int n, u8 *buf) +{ + unsigned char id[41]; + int mode = 0; + char *mtype; + char mbuf[8]; + char *cbl = "(40 wire cable)"; + + static const char *types[5] = { + "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK" + }; + + if (buf[52] > 4) /* No Disk */ + return; + + ata_id_c_string((u16 *)buf, id, 0, 41); + + if (buf[51]) { + mode = ffs(buf[51]); + mtype = "UDMA"; + } else if (buf[49]) { + mode = ffs(buf[49]); + mtype = "MWDMA"; + } + + if (buf[76]) + cbl = ""; + + if (mode) + snprintf(mbuf, 8, "%5s%d", mtype, mode - 1); + else + strcpy(mbuf, "PIO"); + if (buf[52] == 4) + printk(KERN_INFO "%d: %-6s %-8s %s %s\n", + n, mbuf, types[buf[52]], id, cbl); + else + printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n", + n, mbuf, types[buf[52]], buf[53], id, cbl); + if (buf[125] < 100) + printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]); +} + +/** + * it821x_firmware_command - issue firmware command + * @ap: IT821x port to interrogate + * @cmd: command + * @len: length + * + * Issue firmware commands expecting data back from the controller. We + * use this to issue commands that do not go via the normal paths. Other + * commands such as 0xFC can be issued normally. + */ + +static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len) +{ + u8 status; + int n = 0; + u16 *buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) { + printk(KERN_ERR "it821x_firmware_command: Out of memory\n"); + return NULL; + } + /* This isn't quite a normal ATA command as we are talking to the + firmware not the drives */ + ap->ctl |= ATA_NIEN; + iowrite8(ap->ctl, ap->ioaddr.ctl_addr); + ata_wait_idle(ap); + iowrite8(ATA_DEVICE_OBS, ap->ioaddr.device_addr); + iowrite8(cmd, ap->ioaddr.command_addr); + udelay(1); + /* This should be almost immediate but a little paranoia goes a long + way. */ + while(n++ < 10) { + status = ioread8(ap->ioaddr.status_addr); + if (status & ATA_ERR) { + kfree(buf); + printk(KERN_ERR "it821x_firmware_command: rejected\n"); + return NULL; + } + if (status & ATA_DRQ) { + ioread16_rep(ap->ioaddr.data_addr, buf, len/2); + return (u8 *)buf; + } + mdelay(1); + } + kfree(buf); + printk(KERN_ERR "it821x_firmware_command: timeout\n"); + return NULL; +} + +/** + * it821x_probe_firmware - firmware reporting/setup + * @ap: IT821x port being probed + * + * Probe the firmware of the controller by issuing firmware command + * 0xFA and analysing the returned data. + */ + +static void it821x_probe_firmware(struct ata_port *ap) +{ + u8 *buf; + int i; + + /* This is a bit ugly as we can't just issue a task file to a device + as this is controller magic */ + + buf = it821x_firmware_command(ap, 0xFA, 512); + + if (buf != NULL) { + printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n", + buf[505], + buf[506], + buf[507], + buf[508]); + for (i = 0; i < 4; i++) + it821x_display_disk(i, buf + 128 * i); + kfree(buf); + } +} + + /** * it821x_port_start - port setup @@ -610,6 +756,8 @@ static int it821x_port_start(struct ata_port *ap) /* Long I/O's although allowed in LBA48 space cause the onboard firmware to enter the twighlight zone */ /* No ATAPI DMA in this mode either */ + if (ap->port_no == 0) + it821x_probe_firmware(ap); } /* Pull the current clocks from 0x50 */ if (conf & (1 << (1 + ap->port_no))) @@ -631,6 +779,25 @@ static int it821x_port_start(struct ata_port *ap) return 0; } +/** + * it821x_rdc_cable - Cable detect for RDC1010 + * @ap: port we are checking + * + * Return the RDC1010 cable type. Unlike the IT821x we know how to do + * this and can do host side cable detect + */ + +static int it821x_rdc_cable(struct ata_port *ap) +{ + u16 r40; + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + + pci_read_config_word(pdev, 0x40, &r40); + if (r40 & (1 << (2 + ap->port_no))) + return ATA_CBL_PATA40; + return ATA_CBL_PATA80; +} + static struct scsi_host_template it821x_sht = { ATA_BMDMA_SHT(DRV_NAME), }; @@ -641,9 +808,10 @@ static struct ata_port_operations it821x_smart_port_ops = { .check_atapi_dma= it821x_check_atapi_dma, .qc_issue = it821x_smart_qc_issue, - .cable_detect = it821x_ident_hack, + .cable_detect = ata_cable_80wire, .set_mode = it821x_smart_set_mode, .dev_config = it821x_dev_config, + .read_id = it821x_read_id, .port_start = it821x_port_start, }; @@ -664,8 +832,29 @@ static struct ata_port_operations it821x_passthru_port_ops = { .port_start = it821x_port_start, }; +static struct ata_port_operations it821x_rdc_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma= it821x_check_atapi_dma, + .sff_dev_select = it821x_passthru_dev_select, + .bmdma_start = it821x_passthru_bmdma_start, + .bmdma_stop = it821x_passthru_bmdma_stop, + .qc_issue = it821x_passthru_qc_issue, + + .cable_detect = it821x_rdc_cable, + .set_piomode = it821x_passthru_set_piomode, + .set_dmamode = it821x_passthru_set_dmamode, + + .port_start = it821x_port_start, +}; + static void it821x_disable_raid(struct pci_dev *pdev) { + /* Neither the RDC nor the IT8211 */ + if (pdev->vendor != PCI_VENDOR_ID_ITE || + pdev->device != PCI_DEVICE_ID_ITE_8212) + return; + /* Reset local CPU, and set BIOS not ready */ pci_write_config_byte(pdev, 0x5E, 0x01); @@ -690,6 +879,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, + .udma_mask = ATA_UDMA6, .port_ops = &it821x_smart_port_ops }; static const struct ata_port_info info_passthru = { @@ -699,6 +889,13 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) .udma_mask = ATA_UDMA6, .port_ops = &it821x_passthru_port_ops }; + static const struct ata_port_info info_rdc = { + .flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = 0x1f, + .mwdma_mask = 0x07, + /* No UDMA */ + .port_ops = &it821x_rdc_port_ops + }; const struct ata_port_info *ppi[] = { NULL, NULL }; static char *mode[2] = { "pass through", "smart" }; @@ -707,21 +904,25 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) rc = pcim_enable_device(pdev); if (rc) return rc; + + if (pdev->vendor == PCI_VENDOR_ID_RDC) { + ppi[0] = &info_rdc; + } else { + /* Force the card into bypass mode if so requested */ + if (it8212_noraid) { + printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); + it821x_disable_raid(pdev); + } + pci_read_config_byte(pdev, 0x50, &conf); + conf &= 1; - /* Force the card into bypass mode if so requested */ - if (it8212_noraid) { - printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n"); - it821x_disable_raid(pdev); + printk(KERN_INFO DRV_NAME": controller in %s mode.\n", + mode[conf]); + if (conf == 0) + ppi[0] = &info_passthru; + else + ppi[0] = &info_smart; } - pci_read_config_byte(pdev, 0x50, &conf); - conf &= 1; - - printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]); - if (conf == 0) - ppi[0] = &info_passthru; - else - ppi[0] = &info_smart; - return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL); } @@ -745,6 +946,7 @@ static int it821x_reinit_one(struct pci_dev *pdev) static const struct pci_device_id it821x[] = { { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), }, { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), }, + { PCI_VDEVICE(RDC, 0x1010), }, { }, }; diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index de8d186f5ab..2014253f6c8 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -169,7 +169,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq) - set_irq_type(irq, IRQT_RISING); + set_irq_type(irq, IRQ_TYPE_EDGE_RISING); /* Setup expansion bus chip selects */ *data->cs0_cfg = data->cs0_bits; diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index fe7cc8ed4ea..bc037ffce20 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -305,7 +305,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - u32 pad; + __le32 pad; if (rw == READ) { pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); @@ -746,14 +746,12 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - u32 pad; + __le32 pad; if (rw == WRITE) { memcpy(&pad, buf + buflen - slop, slop); - pad = le32_to_cpu(pad); - iowrite32(pad, ap->ioaddr.data_addr); + iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); } else { - pad = ioread32(ap->ioaddr.data_addr); - pad = cpu_to_le32(pad); + pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); } } diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index bc79df6e7cb..a9e827356d0 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -16,10 +16,10 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/libata.h> +#include <linux/of_platform.h> #include <asm/types.h> #include <asm/prom.h> -#include <asm/of_platform.h> #include <asm/mpc52xx.h> diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3d39f9dfec5..41b4361bbf6 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -414,6 +414,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), + PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), @@ -424,6 +425,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), + PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c index 97e5b090d7c..63b7a1c165a 100644 --- a/drivers/ata/pata_qdi.c +++ b/drivers/ata/pata_qdi.c @@ -137,7 +137,7 @@ static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf, iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - u32 pad; + __le32 pad; if (rw == READ) { pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index a108d259f19..f8b3ffc8ae9 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -57,7 +57,9 @@ static inline void rb532_pata_finish_io(struct ata_port *ap) struct ata_host *ah = ap->host; struct rb532_cf_info *info = ah->private_data; - ata_sff_altstatus(ap); + /* FIXME: Keep previous delay. If this is merely a fence then + ata_sff_sync might be sufficient. */ + ata_sff_dma_pause(ap); ndelay(RB500_CF_IO_DELAY); set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index e965b251ca2..16673d16857 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -696,7 +696,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) if (reg & INTSTS_BMSINT) { unsigned int classes; - unsigned long deadline = jiffies + ATA_TMOUT_BOOT; + unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); /* TBD: SW reset */ @@ -726,7 +726,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_altstatus(ap); /* dummy read */ + ata_sff_dma_pause(ap); /* dummy read */ } /** @@ -747,7 +747,8 @@ static u8 scc_bmdma_status (struct ata_port *ap) return host_stat; /* errata A252,A308 workaround: Step4 */ - if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) + if ((scc_check_altstatus(ap) & ATA_ERR) + && (int_status & INTSTS_INTRQ)) return (host_stat | ATA_DMA_INTR); /* errata A308 workaround Step5 */ diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index e82c66e8d31..26345d7b531 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -56,6 +56,7 @@ static const struct sis_laptop sis_laptop[] = { { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ + { 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */ /* end marker */ { 0, } }; diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 708ed144ede..57d951b11f2 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -98,7 +98,8 @@ static const struct via_isa_bridge { u8 rev_max; u16 flags; } via_isa_bridges[] = { - { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, + { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | + VIA_BAD_AST | VIA_SATA_PATA }, { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA }, @@ -322,6 +323,65 @@ static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]); } +/** + * via_ata_sff_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * Note: This is to fix the internal bug of via chipsets, which + * will reset the device register after changing the IEN bit on + * ctl register + */ +static void via_ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + iowrite8(tf->ctl, ioaddr->ctl_addr); + iowrite8(tf->device, ioaddr->device_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + iowrite8(tf->hob_feature, ioaddr->feature_addr); + iowrite8(tf->hob_nsect, ioaddr->nsect_addr); + iowrite8(tf->hob_lbal, ioaddr->lbal_addr); + iowrite8(tf->hob_lbam, ioaddr->lbam_addr); + iowrite8(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + iowrite8(tf->feature, ioaddr->feature_addr); + iowrite8(tf->nsect, ioaddr->nsect_addr); + iowrite8(tf->lbal, ioaddr->lbal_addr); + iowrite8(tf->lbam, ioaddr->lbam_addr); + iowrite8(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + iowrite8(tf->device, ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + static struct scsi_host_template via_sht = { ATA_BMDMA_SHT(DRV_NAME), }; @@ -332,11 +392,13 @@ static struct ata_port_operations via_port_ops = { .set_piomode = via_set_piomode, .set_dmamode = via_set_dmamode, .prereset = via_pre_reset, + .sff_tf_load = via_ata_tf_load, }; static struct ata_port_operations via_port_ops_noirq = { .inherits = &via_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, + .sff_tf_load = via_ata_tf_load, }; /** diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c index 474528f8fe3..a7606b044a6 100644 --- a/drivers/ata/pata_winbond.c +++ b/drivers/ata/pata_winbond.c @@ -105,7 +105,7 @@ static unsigned int winbond_data_xfer(struct ata_device *dev, iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - u32 pad; + __le32 pad; if (rw == READ) { pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 853559e3231..3924e7209a4 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -34,7 +34,7 @@ enum { SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | - ATA_FLAG_NCQ), + ATA_FLAG_PMP | ATA_FLAG_NCQ), SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ @@ -395,7 +395,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) cd = (struct command_desc *)pp->cmdentry + tag; cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; - ata_tf_to_fis(&qc->tf, 0, 1, (u8 *) &cd->cfis); + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis); VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", cd->cfis[0], cd->cfis[1], cd->cfis[2]); @@ -438,6 +438,8 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) ioread32(CA + hcr_base), ioread32(CE + hcr_base), ioread32(CC + hcr_base)); + iowrite32(qc->dev->link->pmp, CQPMP + hcr_base); + /* Simply queue command to the controller/device */ iowrite32(1 << tag, CQ + hcr_base); @@ -558,11 +560,36 @@ static void sata_fsl_thaw(struct ata_port *ap) ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); } +static void sata_fsl_pmp_attach(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL); +} + +static void sata_fsl_pmp_detach(struct ata_port *ap) +{ + struct sata_fsl_host_priv *host_priv = ap->host->private_data; + void __iomem *hcr_base = host_priv->hcr_base; + u32 temp; + + temp = ioread32(hcr_base + HCONTROL); + temp &= ~HCONTROL_PMP_ATTACHED; + iowrite32(temp, hcr_base + HCONTROL); + + /* enable interrupts on the controller/port */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL); + +} + static int sata_fsl_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct sata_fsl_port_priv *pp; - int retval; void *mem; dma_addr_t mem_dma; struct sata_fsl_host_priv *host_priv = ap->host->private_data; @@ -688,12 +715,13 @@ static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline) } static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, - unsigned long deadline) + unsigned long deadline) { struct ata_port *ap = link->ap; struct sata_fsl_port_priv *pp = ap->private_data; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; + int pmp = sata_srst_pmp(link); u32 temp; struct ata_taskfile tf; u8 *cfis; @@ -703,6 +731,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, DPRINTK("in xx_softreset\n"); + if (pmp != SATA_PMP_CTRL_PORT) + goto issue_srst; + try_offline_again: /* * Force host controller to go off-line, aborting current operations @@ -746,6 +777,7 @@ try_offline_again: temp = ioread32(hcr_base + HCONTROL); temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE); + temp |= HCONTROL_PMP_ATTACHED; iowrite32(temp, hcr_base + HCONTROL); temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); @@ -771,7 +803,8 @@ try_offline_again: ata_port_printk(ap, KERN_WARNING, "No Device OR PHYRDY change,Hstatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); - goto err; + *class = ATA_DEV_NONE; + goto out; } /* @@ -783,7 +816,8 @@ try_offline_again: if ((temp & 0xFF) != 0x18) { ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); - goto err; + *class = ATA_DEV_NONE; + goto out; } else { ata_port_printk(ap, KERN_INFO, "Signature Update detected @ %d msecs\n", @@ -798,6 +832,7 @@ try_offline_again: * reached here, we can send a command to the target device */ +issue_srst: DPRINTK("Sending SRST/device reset\n"); ata_tf_init(link->device, &tf); @@ -808,7 +843,7 @@ try_offline_again: SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ - ata_tf_to_fis(&tf, 0, 0, cfis); + ata_tf_to_fis(&tf, pmp, 0, cfis); DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", cfis[0], cfis[1], cfis[2], cfis[3]); @@ -854,8 +889,10 @@ try_offline_again: sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5); tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ - ata_tf_to_fis(&tf, 0, 0, cfis); + ata_tf_to_fis(&tf, pmp, 0, cfis); + if (pmp != SATA_PMP_CTRL_PORT) + iowrite32(pmp, CQPMP + hcr_base); iowrite32(1, CQ + hcr_base); msleep(150); /* ?? */ @@ -886,12 +923,21 @@ try_offline_again: VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); } +out: return 0; err: return -EIO; } +static void sata_fsl_error_handler(struct ata_port *ap) +{ + + DPRINTK("in xx_error_handler\n"); + sata_pmp_error_handler(ap); + +} + static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) { if (qc->flags & ATA_QCFLAG_FAILED) @@ -905,18 +951,21 @@ static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) static void sata_fsl_error_intr(struct ata_port *ap) { - struct ata_link *link = &ap->link; - struct ata_eh_info *ehi = &link->eh_info; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - u32 hstatus, dereg, cereg = 0, SError = 0; + u32 hstatus, dereg=0, cereg = 0, SError = 0; unsigned int err_mask = 0, action = 0; - struct ata_queued_cmd *qc; - int freeze = 0; + int freeze = 0, abort=0; + struct ata_link *link = NULL; + struct ata_queued_cmd *qc = NULL; + struct ata_eh_info *ehi; hstatus = ioread32(hcr_base + HSTATUS); cereg = ioread32(hcr_base + CE); + /* first, analyze and record host port events */ + link = &ap->link; + ehi = &link->eh_info; ata_ehi_clear_desc(ehi); /* @@ -926,42 +975,28 @@ static void sata_fsl_error_intr(struct ata_port *ap) sata_fsl_scr_read(ap, SCR_ERROR, &SError); if (unlikely(SError & 0xFFFF0000)) { sata_fsl_scr_write(ap, SCR_ERROR, SError); - err_mask |= AC_ERR_ATA_BUS; } DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", hstatus, cereg, ioread32(hcr_base + DE), SError); - /* handle single device errors */ - if (cereg) { - /* - * clear the command error, also clears queue to the device - * in error, and we can (re)issue commands to this device. - * When a device is in error all commands queued into the - * host controller and at the device are considered aborted - * and the queue for that device is stopped. Now, after - * clearing the device error, we can issue commands to the - * device to interrogate it to find the source of the error. - */ - dereg = ioread32(hcr_base + DE); - iowrite32(dereg, hcr_base + DE); - iowrite32(cereg, hcr_base + CE); + /* handle fatal errors */ + if (hstatus & FATAL_ERROR_DECODE) { + ehi->err_mask |= AC_ERR_ATA_BUS; + ehi->action |= ATA_EH_SOFTRESET; - DPRINTK("single device error, CE=0x%x, DE=0x%x\n", - ioread32(hcr_base + CE), ioread32(hcr_base + DE)); /* - * We should consider this as non fatal error, and TF must - * be updated as done below. + * Ignore serror in case of fatal errors as we always want + * to do a soft-reset of the FSL SATA controller. Analyzing + * serror may cause libata to schedule a hard-reset action, + * and hard-reset currently does not do controller + * offline/online, causing command timeouts and leads to an + * un-recoverable state, hence make libATA ignore + * autopsy in case of fatal errors. */ - err_mask |= AC_ERR_DEV; - } + ehi->flags |= ATA_EHI_NO_AUTOPSY; - /* handle fatal errors */ - if (hstatus & FATAL_ERROR_DECODE) { - err_mask |= AC_ERR_ATA_BUS; - action |= ATA_EH_RESET; - /* how will fatal error interrupts be completed ?? */ freeze = 1; } @@ -971,30 +1006,83 @@ static void sata_fsl_error_intr(struct ata_port *ap) /* Setup a soft-reset EH action */ ata_ehi_hotplugged(ehi); + ata_ehi_push_desc(ehi, "%s", "PHY RDY changed"); freeze = 1; } - /* record error info */ - qc = ata_qc_from_tag(ap, link->active_tag); + /* handle single device errors */ + if (cereg) { + /* + * clear the command error, also clears queue to the device + * in error, and we can (re)issue commands to this device. + * When a device is in error all commands queued into the + * host controller and at the device are considered aborted + * and the queue for that device is stopped. Now, after + * clearing the device error, we can issue commands to the + * device to interrogate it to find the source of the error. + */ + abort = 1; + + DPRINTK("single device error, CE=0x%x, DE=0x%x\n", + ioread32(hcr_base + CE), ioread32(hcr_base + DE)); - if (qc) + /* find out the offending link and qc */ + if (ap->nr_pmp_links) { + dereg = ioread32(hcr_base + DE); + iowrite32(dereg, hcr_base + DE); + iowrite32(cereg, hcr_base + CE); + + if (dereg < ap->nr_pmp_links) { + link = &ap->pmp_link[dereg]; + ehi = &link->eh_info; + qc = ata_qc_from_tag(ap, link->active_tag); + /* + * We should consider this as non fatal error, + * and TF must be updated as done below. + */ + + err_mask |= AC_ERR_DEV; + + } else { + err_mask |= AC_ERR_HSM; + action |= ATA_EH_HARDRESET; + freeze = 1; + } + } else { + dereg = ioread32(hcr_base + DE); + iowrite32(dereg, hcr_base + DE); + iowrite32(cereg, hcr_base + CE); + + qc = ata_qc_from_tag(ap, link->active_tag); + /* + * We should consider this as non fatal error, + * and TF must be updated as done below. + */ + err_mask |= AC_ERR_DEV; + } + } + + /* record error info */ + if (qc) { qc->err_mask |= err_mask; - else + } else ehi->err_mask |= err_mask; ehi->action |= action; - ehi->serror |= SError; /* freeze or abort */ if (freeze) ata_port_freeze(ap); - else - ata_port_abort(ap); + else if (abort) { + if (qc) + ata_link_abort(qc->dev->link); + else + ata_port_abort(ap); + } } static void sata_fsl_host_intr(struct ata_port *ap) { - struct ata_link *link = &ap->link; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; u32 hstatus, qc_active = 0; @@ -1017,10 +1105,19 @@ static void sata_fsl_host_intr(struct ata_port *ap) return; } - if (link->sactive) { /* only true for NCQ commands */ + /* Read command completed register */ + qc_active = ioread32(hcr_base + CC); + + VPRINTK("Status of all queues :\n"); + VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", + qc_active, + ioread32(hcr_base + CA), + ioread32(hcr_base + CE), + ioread32(hcr_base + CQ), + ap->qc_active); + + if (qc_active & ap->qc_active) { int i; - /* Read command completed register */ - qc_active = ioread32(hcr_base + CC); /* clear CC bit, this will also complete the interrupt */ iowrite32(qc_active, hcr_base + CC); @@ -1032,8 +1129,9 @@ static void sata_fsl_host_intr(struct ata_port *ap) for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { if (qc_active & (1 << i)) { qc = ata_qc_from_tag(ap, i); - if (qc) + if (qc) { ata_qc_complete(qc); + } DPRINTK ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", i, ioread32(hcr_base + CC), @@ -1042,19 +1140,21 @@ static void sata_fsl_host_intr(struct ata_port *ap) } return; - } else if (ap->qc_active) { + } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { iowrite32(1, hcr_base + CC); - qc = ata_qc_from_tag(ap, link->active_tag); + qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); - DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n", - link->active_tag, ioread32(hcr_base + CC)); + DPRINTK("completing non-ncq cmd, CC=0x%x\n", + ioread32(hcr_base + CC)); - if (qc) + if (qc) { ata_qc_complete(qc); + } } else { /* Spurious Interrupt!! */ DPRINTK("spurious interrupt!!, CC = 0x%x\n", ioread32(hcr_base + CC)); + iowrite32(qc_active, hcr_base + CC); return; } } @@ -1130,9 +1230,6 @@ static int sata_fsl_init_controller(struct ata_host *host) iowrite32(0x00000FFFF, hcr_base + CE); iowrite32(0x00000FFFF, hcr_base + DE); - /* initially assuming no Port multiplier, set CQPMP to 0 */ - iowrite32(0x0, hcr_base + CQPMP); - /* * host controller will be brought on-line, during xx_port_start() * callback, that should also initiate the OOB, COMINIT sequence @@ -1154,8 +1251,8 @@ static struct scsi_host_template sata_fsl_sht = { .dma_boundary = ATA_DMA_BOUNDARY, }; -static const struct ata_port_operations sata_fsl_ops = { - .inherits = &sata_port_ops, +static struct ata_port_operations sata_fsl_ops = { + .inherits = &sata_pmp_port_ops, .qc_prep = sata_fsl_qc_prep, .qc_issue = sata_fsl_qc_issue, @@ -1168,10 +1265,15 @@ static const struct ata_port_operations sata_fsl_ops = { .thaw = sata_fsl_thaw, .prereset = sata_fsl_prereset, .softreset = sata_fsl_softreset, + .pmp_softreset = sata_fsl_softreset, + .error_handler = sata_fsl_error_handler, .post_internal_cmd = sata_fsl_post_internal_cmd, .port_start = sata_fsl_port_start, .port_stop = sata_fsl_port_stop, + + .pmp_attach = sata_fsl_pmp_attach, + .pmp_detach = sata_fsl_pmp_detach, }; static const struct ata_port_info sata_fsl_port_info[] = { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index fb81f0c7a8c..ad169ffbc4c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -72,7 +72,7 @@ #include <linux/libata.h> #define DRV_NAME "sata_mv" -#define DRV_VERSION "1.21" +#define DRV_VERSION "1.24" enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -122,8 +122,6 @@ enum { /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ - /* SoC integrated controllers, no PCI interface */ - MV_FLAG_SOC = (1 << 28), MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | @@ -226,6 +224,11 @@ enum { PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, + PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ + PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ + PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ + PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ + PHY_MODE2 = 0x330, SATA_IFCTL_OFS = 0x344, SATA_TESTCTL_OFS = 0x348, @@ -356,12 +359,12 @@ enum { MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), - MV_HP_ERRATA_XX42A0 = (1 << 5), MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ + MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ @@ -374,7 +377,7 @@ enum { #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) -#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) +#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) @@ -652,7 +655,7 @@ static const struct ata_port_info mv_port_info[] = { .port_ops = &mv_iie_ops, }, { /* chip_soc */ - .flags = MV_GENIIE_FLAGS | MV_FLAG_SOC, + .flags = MV_GENIIE_FLAGS, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, @@ -812,12 +815,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl((pp->crqb_dma & 0xffffffff) | index, - port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); - else - writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); /* * initialize response queue @@ -827,13 +825,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, WARN_ON(pp->crpb_dma & 0xff); writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl((pp->crpb_dma & 0xffffffff) | index, - port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - else - writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - + writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); } @@ -1254,7 +1246,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ cfg |= (1 << 22); /* enab 4-entry host queue cache */ - if (HAS_PCI(ap->host)) + if (!IS_SOC(hpriv)) cfg |= (1 << 18); /* enab early completion */ if (hpriv->hp_flags & MV_HP_CUT_THROUGH) cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ @@ -1330,6 +1322,9 @@ static int mv_port_start(struct ata_port *ap) goto out_port_free_dma_mem; memset(pp->crpb, 0, MV_CRPB_Q_SZ); + /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ + if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) + ap->flags |= ATA_FLAG_AN; /* * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. * For later hardware, we need one unique sg_tbl per NCQ tag. @@ -1600,6 +1595,24 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) if ((qc->tf.protocol != ATA_PROT_DMA) && (qc->tf.protocol != ATA_PROT_NCQ)) { + static int limit_warnings = 10; + /* + * Errata SATA#16, SATA#24: warn if multiple DRQs expected. + * + * Someday, we might implement special polling workarounds + * for these, but it all seems rather unnecessary since we + * normally use only DMA for commands which transfer more + * than a single block of data. + * + * Much of the time, this could just work regardless. + * So for now, just log the incident, and allow the attempt. + */ + if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { + --limit_warnings; + ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME + ": attempting PIO w/multiple DRQ: " + "this may fail due to h/w errata\n"); + } /* * We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing @@ -2225,7 +2238,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) * a bogus register value which can indicate HW removal or PCI fault. */ if (pending_irqs && main_irq_cause != 0xffffffffU) { - if (unlikely((pending_irqs & PCI_ERR) && HAS_PCI(host))) + if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) handled = mv_pci_error(host, hpriv->base); else handled = mv_host_intr(host, pending_irqs); @@ -2547,7 +2560,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); int fix_phy_mode4 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); - u32 m2, tmp; + u32 m2, m3; if (fix_phy_mode2) { m2 = readl(port_mmio + PHY_MODE2); @@ -2564,28 +2577,36 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, udelay(200); } - /* who knows what this magic does */ - tmp = readl(port_mmio + PHY_MODE3); - tmp &= ~0x7F800000; - tmp |= 0x2A800000; - writel(tmp, port_mmio + PHY_MODE3); - - if (fix_phy_mode4) { - u32 m4; - - m4 = readl(port_mmio + PHY_MODE4); - - if (hp_flags & MV_HP_ERRATA_60X1B2) - tmp = readl(port_mmio + PHY_MODE3); + /* + * Gen-II/IIe PHY_MODE3 errata RM#2: + * Achieves better receiver noise performance than the h/w default: + */ + m3 = readl(port_mmio + PHY_MODE3); + m3 = (m3 & 0x1f) | (0x5555601 << 5); - /* workaround for errata FEr SATA#10 (part 1) */ - m4 = (m4 & ~(1 << 1)) | (1 << 0); + /* Guideline 88F5182 (GL# SATA-S11) */ + if (IS_SOC(hpriv)) + m3 &= ~0x1c; + if (fix_phy_mode4) { + u32 m4 = readl(port_mmio + PHY_MODE4); + /* + * Enforce reserved-bit restrictions on GenIIe devices only. + * For earlier chipsets, force only the internal config field + * (workaround for errata FEr SATA#10 part 1). + */ + if (IS_GEN_IIE(hpriv)) + m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; + else + m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; writel(m4, port_mmio + PHY_MODE4); - - if (hp_flags & MV_HP_ERRATA_60X1B2) - writel(tmp, port_mmio + PHY_MODE3); } + /* + * Workaround for 60x1-B2 errata SATA#13: + * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, + * so we must always rewrite PHY_MODE3 after PHY_MODE4. + */ + writel(m3, port_mmio + PHY_MODE3); /* Revert values of pre-emphasis and signal amps to the saved ones */ m2 = readl(port_mmio + PHY_MODE2); @@ -2876,7 +2897,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host) void __iomem *mmio = hpriv->base; u32 reg; - if (!HAS_PCI(host) || !IS_PCIE(hpriv)) + if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) return 0; /* not PCI-X capable */ reg = readl(mmio + MV_PCI_MODE_OFS); if ((reg & MV_PCI_MODE_MASK) == 0) @@ -3003,10 +3024,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) hp_flags |= MV_HP_CUT_THROUGH; switch (pdev->revision) { - case 0x0: - hp_flags |= MV_HP_ERRATA_XX42A0; - break; - case 0x1: + case 0x2: /* Rev.B0: the first/only public release */ hp_flags |= MV_HP_ERRATA_60X1C0; break; default: @@ -3018,7 +3036,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; case chip_soc: hpriv->ops = &mv_soc_ops; - hp_flags |= MV_HP_ERRATA_60X1C0; + hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; break; default: @@ -3062,12 +3080,12 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) if (rc) goto done; - if (HAS_PCI(host)) { - hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; - hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; - } else { + if (IS_SOC(hpriv)) { hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; + } else { + hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; + hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; } /* global interrupt mask: 0 == mask everything */ @@ -3093,7 +3111,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) mv_port_init(&ap->ioaddr, port_mmio); #ifdef CONFIG_PCI - if (HAS_PCI(host)) { + if (!IS_SOC(hpriv)) { unsigned int offset = port_mmio - mmio; ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); @@ -3113,7 +3131,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); } - if (HAS_PCI(host)) { + if (!IS_SOC(hpriv)) { /* Clear any currently outstanding host interrupt conditions */ writelfl(0, mmio + hpriv->irq_cause_ofs); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 8ee6b5b4ede..84ffcc26a74 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -370,6 +370,7 @@ static const struct pci_device_id sil24_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, + { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 }, { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 16aa6839aa5..fb13b82aacb 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -253,21 +253,29 @@ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) /* start host DMA transaction */ dmactl = readb(mmio + ATA_DMA_CMD); writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); - /* There is a race condition in certain SATA controllers that can - be seen when the r/w command is given to the controller before the - host DMA is started. On a Read command, the controller would initiate - the command to the drive even before it sees the DMA start. When there - are very fast drives connected to the controller, or when the data request - hits in the drive cache, there is the possibility that the drive returns a part - or all of the requested data to the controller before the DMA start is issued. - In this case, the controller would become confused as to what to do with the data. - In the worst case when all the data is returned back to the controller, the - controller could hang. In other cases it could return partial data returning - in data corruption. This problem has been seen in PPC systems and can also appear - on an system with very fast disks, where the SATA controller is sitting behind a - number of bridges, and hence there is significant latency between the r/w command - and the start command. */ - /* issue r/w command if the access is to ATA*/ + /* This works around possible data corruption. + + On certain SATA controllers that can be seen when the r/w + command is given to the controller before the host DMA is + started. + + On a Read command, the controller would initiate the + command to the drive even before it sees the DMA + start. When there are very fast drives connected to the + controller, or when the data request hits in the drive + cache, there is the possibility that the drive returns a + part or all of the requested data to the controller before + the DMA start is issued. In this case, the controller + would become confused as to what to do with the data. In + the worst case when all the data is returned back to the + controller, the controller could hang. In other cases it + could return partial data returning in data + corruption. This problem has been seen in PPC systems and + can also appear on an system with very fast disks, where + the SATA controller is sitting behind a number of bridges, + and hence there is significant latency between the r/w + command and the start command. */ + /* issue r/w command if the access is to ATA */ if (qc->tf.protocol == ATA_PROT_DMA) ap->ops->sff_exec_command(ap, &qc->tf); } diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index f277cea904c..db529b84994 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -83,6 +83,7 @@ static struct ata_port_operations uli_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, + .hardreset = ATA_OP_NULL, }; static const struct ata_port_info uli_port_info = { |