diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-03-11 19:21:17 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-03-11 19:21:17 -0500 |
commit | ce1e7a2ac721eb9d825b63f74752d0c9e0c635c2 (patch) | |
tree | a7c0d18d6486734dffb9498e30db5b226b6e23bf /drivers/scsi | |
parent | c2956a3b0d1c17b38da369811a6ce93eb7a01a04 (diff) | |
parent | 75deb6fa985bd3162b9472f1fc394e23294da816 (diff) |
Merge branch 'upstream'
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/ahci.c | 14 | ||||
-rw-r--r-- | drivers/scsi/ata_piix.c | 315 | ||||
-rw-r--r-- | drivers/scsi/libata-core.c | 1003 | ||||
-rw-r--r-- | drivers/scsi/libata-scsi.c | 2 | ||||
-rw-r--r-- | drivers/scsi/libata.h | 1 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 2 | ||||
-rw-r--r-- | drivers/scsi/sata_sil.c | 99 | ||||
-rw-r--r-- | drivers/scsi/sata_sil24.c | 1 | ||||
-rw-r--r-- | drivers/scsi/sr_ioctl.c | 2 |
9 files changed, 881 insertions, 558 deletions
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index 1c2ab3dede7..00dfdefe296 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c @@ -778,23 +778,17 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); if (!ahci_host_intr(ap, qc)) - if (ata_ratelimit()) { - struct pci_dev *pdev = - to_pci_dev(ap->host_set->dev); - dev_printk(KERN_WARNING, &pdev->dev, + if (ata_ratelimit()) + dev_printk(KERN_WARNING, host_set->dev, "unhandled interrupt on port %u\n", i); - } VPRINTK("port %u\n", i); } else { VPRINTK("port %u (no irq)\n", i); - if (ata_ratelimit()) { - struct pci_dev *pdev = - to_pci_dev(ap->host_set->dev); - dev_printk(KERN_WARNING, &pdev->dev, + if (ata_ratelimit()) + dev_printk(KERN_WARNING, host_set->dev, "interrupt on disabled port %u\n", i); - } } irq_ack |= (1 << i); diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index c662bf53151..9327b62f97d 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -101,6 +101,8 @@ enum { ICH5_PCS = 0x92, /* port control and status */ PIIX_SCC = 0x0A, /* sub-class code register */ + PIIX_FLAG_IGNORE_PCS = (1 << 25), /* ignore PCS present bits */ + PIIX_FLAG_SCR = (1 << 26), /* SCR available */ PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */ PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */ @@ -110,24 +112,38 @@ enum { /* combined mode. if set, PATA is channel 0. * if clear, PATA is channel 1. */ - PIIX_COMB_PATA_P0 = (1 << 1), - PIIX_COMB = (1 << 2), /* combined mode enabled? */ - PIIX_PORT_ENABLED = (1 << 0), PIIX_PORT_PRESENT = (1 << 4), PIIX_80C_PRI = (1 << 5) | (1 << 4), PIIX_80C_SEC = (1 << 7) | (1 << 6), - ich5_pata = 0, - ich5_sata = 1, - piix4_pata = 2, - ich6_sata = 3, - ich6_sata_ahci = 4, + /* controller IDs */ + piix4_pata = 0, + ich5_pata = 1, + ich5_sata = 2, + esb_sata = 3, + ich6_sata = 4, + ich6_sata_ahci = 5, + ich6m_sata_ahci = 6, + + /* constants for mapping table */ + P0 = 0, /* port 0 */ + P1 = 1, /* port 1 */ + P2 = 2, /* port 2 */ + P3 = 3, /* port 3 */ + IDE = -1, /* IDE */ + NA = -2, /* not avaliable */ + RV = -3, /* reserved */ PIIX_AHCI_DEVICE = 6, }; +struct piix_map_db { + const u32 mask; + const int map[][4]; +}; + static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); @@ -149,19 +165,32 @@ static const struct pci_device_id piix_pci_tbl[] = { * list in drivers/pci/quirks.c. */ + /* 82801EB (ICH5) */ { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + /* 82801EB (ICH5) */ { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, - { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, - { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + /* 6300ESB (ICH5 variant with broken PCS present bits) */ + { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, + /* 6300ESB pretending RAID */ + { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, esb_sata }, + /* 82801FB/FW (ICH6/ICH6W) */ { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, + /* 82801FR/FRW (ICH6R/ICH6RW) */ { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented) */ + { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, + /* 82801GB/GR/GH (ICH7, identical to ICH6) */ { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ + { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, + /* Enterprise Southbridge 2 (where's the datasheet?) */ { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + /* SATA Controller 1 IDE (ICH8, no datasheet yet) */ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + /* SATA Controller 2 IDE (ICH8, ditto) */ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, - { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci }, + /* Mobile SATA Controller IDE (ICH8M, ditto) */ + { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci }, { } /* terminate list */ }; @@ -254,7 +283,58 @@ static const struct ata_port_operations piix_sata_ops = { .host_stop = ata_host_stop, }; +static struct piix_map_db ich5_map_db = { + .mask = 0x7, + .map = { + /* PM PS SM SS MAP */ + { P0, NA, P1, NA }, /* 000b */ + { P1, NA, P0, NA }, /* 001b */ + { RV, RV, RV, RV }, + { RV, RV, RV, RV }, + { P0, P1, IDE, IDE }, /* 100b */ + { P1, P0, IDE, IDE }, /* 101b */ + { IDE, IDE, P0, P1 }, /* 110b */ + { IDE, IDE, P1, P0 }, /* 111b */ + }, +}; + +static struct piix_map_db ich6_map_db = { + .mask = 0x3, + .map = { + /* PM PS SM SS MAP */ + { P0, P1, P2, P3 }, /* 00b */ + { IDE, IDE, P1, P3 }, /* 01b */ + { P0, P2, IDE, IDE }, /* 10b */ + { RV, RV, RV, RV }, + }, +}; + +static struct piix_map_db ich6m_map_db = { + .mask = 0x3, + .map = { + /* PM PS SM SS MAP */ + { P0, P1, P2, P3 }, /* 00b */ + { RV, RV, RV, RV }, + { P0, P2, IDE, IDE }, /* 10b */ + { RV, RV, RV, RV }, + }, +}; + static struct ata_port_info piix_port_info[] = { + /* piix4_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS, + .pio_mask = 0x1f, /* pio0-4 */ +#if 0 + .mwdma_mask = 0x06, /* mwdma1-2 */ +#else + .mwdma_mask = 0x00, /* mwdma broken */ +#endif + .udma_mask = ATA_UDMA_MASK_40C, + .port_ops = &piix_pata_ops, + }, + /* ich5_pata */ { .sht = &piix_sht, @@ -278,43 +358,57 @@ static struct ata_port_info piix_port_info[] = { .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, + .private_data = &ich5_map_db, }, - /* piix4_pata */ + /* i6300esb_sata */ { .sht = &piix_sht, - .host_flags = ATA_FLAG_SLAVE_POSS, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED | + PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, .pio_mask = 0x1f, /* pio0-4 */ -#if 0 - .mwdma_mask = 0x06, /* mwdma1-2 */ -#else - .mwdma_mask = 0x00, /* mwdma broken */ -#endif - .udma_mask = ATA_UDMA_MASK_40C, - .port_ops = &piix_pata_ops, + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x7f, /* udma0-6 */ + .port_ops = &piix_sata_ops, + .private_data = &ich5_map_db, }, /* ich6_sata */ { .sht = &piix_sht, .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | - PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS, + PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, + .private_data = &ich6_map_db, }, /* ich6_sata_ahci */ { .sht = &piix_sht, .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | - PIIX_FLAG_CHECKINTR | ATA_FLAG_SLAVE_POSS | + PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | PIIX_FLAG_AHCI, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &piix_sata_ops, + .private_data = &ich6_map_db, + }, + + /* ich6m_sata_ahci */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 | + PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | + PIIX_FLAG_AHCI, + .pio_mask = 0x1f, /* pio0-4 */ + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x7f, /* udma0-6 */ + .port_ops = &piix_sata_ops, + .private_data = &ich6m_map_db, }, }; @@ -405,44 +499,59 @@ static int piix_pata_probe_reset(struct ata_port *ap, unsigned int *classes) * piix_sata_probe - Probe PCI device for present SATA devices * @ap: Port associated with the PCI device we wish to probe * - * Reads SATA PCI device's PCI config register Port Configuration - * and Status (PCS) to determine port and device availability. + * Reads and configures SATA PCI device's PCI config register + * Port Configuration and Status (PCS) to determine port and + * device availability. * * LOCKING: * None (inherited from caller). * * RETURNS: - * Non-zero if port is enabled, it may or may not have a device - * attached in that case (PRESENT bit would only be set if BIOS probe - * was done). Zero is returned if port is disabled. + * Mask of avaliable devices on the port. */ -static int piix_sata_probe (struct ata_port *ap) +static unsigned int piix_sata_probe (struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); - int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); - int orig_mask, mask, i; + const unsigned int *map = ap->host_set->private_data; + int base = 2 * ap->hard_port_no; + unsigned int present_mask = 0; + int port, i; u8 pcs; pci_read_config_byte(pdev, ICH5_PCS, &pcs); - orig_mask = (int) pcs & 0xff; - - /* TODO: this is vaguely wrong for ICH6 combined mode, - * where only two of the four SATA ports are mapped - * onto a single ATA channel. It is also vaguely inaccurate - * for ICH5, which has only two ports. However, this is ok, - * as further device presence detection code will handle - * any false positives produced here. - */ + DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base); - for (i = 0; i < 4; i++) { - mask = (PIIX_PORT_ENABLED << i); + /* enable all ports on this ap and wait for them to settle */ + for (i = 0; i < 2; i++) { + port = map[base + i]; + if (port >= 0) + pcs |= 1 << port; + } - if ((orig_mask & mask) == mask) - if (combined || (i == ap->hard_port_no)) - return 1; + pci_write_config_byte(pdev, ICH5_PCS, pcs); + msleep(100); + + /* let's see which devices are present */ + pci_read_config_byte(pdev, ICH5_PCS, &pcs); + + for (i = 0; i < 2; i++) { + port = map[base + i]; + if (port < 0) + continue; + if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port)) + present_mask |= 1 << i; + else + pcs &= ~(1 << port); } - return 0; + /* disable offline ports on non-AHCI controllers */ + if (!(ap->flags & PIIX_FLAG_AHCI)) + pci_write_config_byte(pdev, ICH5_PCS, pcs); + + DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n", + ap->id, pcs, present_mask); + + return present_mask; } /** @@ -666,6 +775,54 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) return no_piix_dma; } +static void __devinit piix_init_sata_map(struct pci_dev *pdev, + struct ata_port_info *pinfo) +{ + struct piix_map_db *map_db = pinfo[0].private_data; + const unsigned int *map; + int i, invalid_map = 0; + u8 map_value; + + pci_read_config_byte(pdev, ICH5_PMR, &map_value); + + map = map_db->map[map_value & map_db->mask]; + + dev_printk(KERN_INFO, &pdev->dev, "MAP ["); + for (i = 0; i < 4; i++) { + switch (map[i]) { + case RV: + invalid_map = 1; + printk(" XX"); + break; + + case NA: + printk(" --"); + break; + + case IDE: + WARN_ON((i & 1) || map[i + 1] != IDE); + pinfo[i / 2] = piix_port_info[ich5_pata]; + i++; + printk(" IDE IDE"); + break; + + default: + printk(" P%d", map[i]); + if (i & 1) + pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS; + break; + } + } + printk(" ]\n"); + + if (invalid_map) + dev_printk(KERN_ERR, &pdev->dev, + "invalid MAP value %u\n", map_value); + + pinfo[0].private_data = (void *)map; + pinfo[1].private_data = (void *)map; +} + /** * piix_init_one - Register PIIX ATA PCI device with kernel services * @pdev: PCI device to register @@ -684,9 +841,8 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; - struct ata_port_info *port_info[2]; - unsigned int combined = 0; - unsigned int pata_chan = 0, sata_chan = 0; + struct ata_port_info port_info[2]; + struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; unsigned long host_flags; if (!printed_version++) @@ -697,10 +853,10 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (!in_module_init) return -ENODEV; - port_info[0] = &piix_port_info[ent->driver_data]; - port_info[1] = &piix_port_info[ent->driver_data]; + port_info[0] = piix_port_info[ent->driver_data]; + port_info[1] = piix_port_info[ent->driver_data]; - host_flags = port_info[0]->host_flags; + host_flags = port_info[0].host_flags; if (host_flags & PIIX_FLAG_AHCI) { u8 tmp; @@ -712,37 +868,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) } } - if (host_flags & PIIX_FLAG_COMBINED) { - u8 tmp; - pci_read_config_byte(pdev, ICH5_PMR, &tmp); - - if (host_flags & PIIX_FLAG_COMBINED_ICH6) { - switch (tmp & 0x3) { - case 0: - break; - case 1: - combined = 1; - sata_chan = 1; - break; - case 2: - combined = 1; - pata_chan = 1; - break; - case 3: - dev_printk(KERN_WARNING, &pdev->dev, - "invalid MAP value %u\n", tmp); - break; - } - } else { - if (tmp & PIIX_COMB) { - combined = 1; - if (tmp & PIIX_COMB_PATA_P0) - sata_chan = 1; - else - pata_chan = 1; - } - } - } + /* Initialize SATA map */ + if (host_flags & ATA_FLAG_SATA) + piix_init_sata_map(pdev, port_info); /* On ICH5, some BIOSen disable the interrupt using the * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. @@ -753,25 +881,16 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (host_flags & PIIX_FLAG_CHECKINTR) pci_intx(pdev, 1); - if (combined) { - port_info[sata_chan] = &piix_port_info[ent->driver_data]; - port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS; - port_info[pata_chan] = &piix_port_info[ich5_pata]; - - dev_printk(KERN_WARNING, &pdev->dev, - "combined mode detected (p=%u, s=%u)\n", - pata_chan, sata_chan); - } if (piix_check_450nx_errata(pdev)) { /* This writes into the master table but it does not really matter for this errata as we will apply it to all the PIIX devices on the board */ - port_info[0]->mwdma_mask = 0; - port_info[0]->udma_mask = 0; - port_info[1]->mwdma_mask = 0; - port_info[1]->udma_mask = 0; + port_info[0].mwdma_mask = 0; + port_info[0].udma_mask = 0; + port_info[1].mwdma_mask = 0; + port_info[1].udma_mask = 0; } - return ata_pci_init_one(pdev, port_info, 2); + return ata_pci_init_one(pdev, ppinfo, 2); } static int __init piix_init(void) diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 9132698d29b..5060a1a1ad2 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -65,12 +65,9 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); static void ata_set_mode(struct ata_port *ap); static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); -static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); -static int fgb(u32 bitmap); -static int ata_choose_xfer_mode(const struct ata_port *ap, - u8 *xfer_mode_out, - unsigned int *xfer_shift_out); static void ata_pio_error(struct ata_port *ap); +static unsigned int ata_dev_xfermask(struct ata_port *ap, + struct ata_device *dev); static unsigned int ata_unique_id = 1; static struct workqueue_struct *ata_wq; @@ -232,58 +229,148 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc) return -1; } -static const char * const xfer_mode_str[] = { - "UDMA/16", - "UDMA/25", - "UDMA/33", - "UDMA/44", - "UDMA/66", - "UDMA/100", - "UDMA/133", - "UDMA7", - "MWDMA0", - "MWDMA1", - "MWDMA2", - "PIO0", - "PIO1", - "PIO2", - "PIO3", - "PIO4", +/** + * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask + * @pio_mask: pio_mask + * @mwdma_mask: mwdma_mask + * @udma_mask: udma_mask + * + * Pack @pio_mask, @mwdma_mask and @udma_mask into a single + * unsigned int xfer_mask. + * + * LOCKING: + * None. + * + * RETURNS: + * Packed xfer_mask. + */ +static unsigned int ata_pack_xfermask(unsigned int pio_mask, + unsigned int mwdma_mask, + unsigned int udma_mask) +{ + return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | + ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | + ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); +} + +static const struct ata_xfer_ent { + unsigned int shift, bits; + u8 base; +} ata_xfer_tbl[] = { + { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, + { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 }, + { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 }, + { -1, }, }; /** - * ata_udma_string - convert UDMA bit offset to string - * @mask: mask of bits supported; only highest bit counts. + * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask + * @xfer_mask: xfer_mask of interest * - * Determine string which represents the highest speed - * (highest bit in @udma_mask). + * Return matching XFER_* value for @xfer_mask. Only the highest + * bit of @xfer_mask is considered. * * LOCKING: * None. * * RETURNS: - * Constant C string representing highest speed listed in - * @udma_mask, or the constant C string "<n/a>". + * Matching XFER_* value, 0 if no match found. */ +static u8 ata_xfer_mask2mode(unsigned int xfer_mask) +{ + int highbit = fls(xfer_mask) - 1; + const struct ata_xfer_ent *ent; -static const char *ata_mode_string(unsigned int mask) + for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) + if (highbit >= ent->shift && highbit < ent->shift + ent->bits) + return ent->base + highbit - ent->shift; + return 0; +} + +/** + * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* + * @xfer_mode: XFER_* of interest + * + * Return matching xfer_mask for @xfer_mode. + * + * LOCKING: + * None. + * + * RETURNS: + * Matching xfer_mask, 0 if no match found. + */ +static unsigned int ata_xfer_mode2mask(u8 xfer_mode) { - int i; + const struct ata_xfer_ent *ent; - for (i = 7; i >= 0; i--) - if (mask & (1 << i)) - goto out; - for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) - if (mask & (1 << i)) - goto out; - for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--) - if (mask & (1 << i)) - goto out; + for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) + if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) + return 1 << (ent->shift + xfer_mode - ent->base); + return 0; +} - return "<n/a>"; +/** + * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* + * @xfer_mode: XFER_* of interest + * + * Return matching xfer_shift for @xfer_mode. + * + * LOCKING: + * None. + * + * RETURNS: + * Matching xfer_shift, -1 if no match found. + */ +static int ata_xfer_mode2shift(unsigned int xfer_mode) +{ + const struct ata_xfer_ent *ent; -out: - return xfer_mode_str[i]; + for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) + if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) + return ent->shift; + return -1; +} + +/** + * ata_mode_string - convert xfer_mask to string + * @xfer_mask: mask of bits supported; only highest bit counts. + * + * Determine string which represents the highest speed + * (highest bit in @modemask). + * + * LOCKING: + * None. + * + * RETURNS: + * Constant C string representing highest speed listed in + * @mode_mask, or the constant C string "<n/a>". + */ +static const char *ata_mode_string(unsigned int xfer_mask) +{ + static const char * const xfer_mode_str[] = { + "PIO0", + "PIO1", + "PIO2", + "PIO3", + "PIO4", + "MWDMA0", + "MWDMA1", + "MWDMA2", + "UDMA/16", + "UDMA/25", + "UDMA/33", + "UDMA/44", + "UDMA/66", + "UDMA/100", + "UDMA/133", + "UDMA7", + }; + int highbit; + + highbit = fls(xfer_mask) - 1; + if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) + return xfer_mode_str[highbit]; + return "<n/a>"; } /** @@ -693,69 +780,104 @@ static inline void ata_dump_id(const u16 *id) id[93]); } -/* - * Compute the PIO modes available for this device. This is not as - * trivial as it seems if we must consider early devices correctly. +/** + * ata_id_xfermask - Compute xfermask from the given IDENTIFY data + * @id: IDENTIFY data to compute xfer mask from * - * FIXME: pre IDE drive timing (do we care ?). + * Compute the xfermask for this device. This is not as trivial + * as it seems if we must consider early devices correctly. + * + * FIXME: pre IDE drive timing (do we care ?). + * + * LOCKING: + * None. + * + * RETURNS: + * Computed xfermask */ - -static unsigned int ata_pio_modes(const struct ata_device *adev) +static unsigned int ata_id_xfermask(const u16 *id) { - u16 modes; + unsigned int pio_mask, mwdma_mask, udma_mask; /* Usual case. Word 53 indicates word 64 is valid */ - if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) { - modes = adev->id[ATA_ID_PIO_MODES] & 0x03; - modes <<= 3; - modes |= 0x7; - return modes; + if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { + pio_mask = id[ATA_ID_PIO_MODES] & 0x03; + pio_mask <<= 3; + pio_mask |= 0x7; + } else { + /* If word 64 isn't valid then Word 51 high byte holds + * the PIO timing number for the maximum. Turn it into + * a mask. + */ + pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; + + /* But wait.. there's more. Design your standards by + * committee and you too can get a free iordy field to + * process. However its the speeds not the modes that + * are supported... Note drivers using the timing API + * will get this right anyway + */ } - /* If word 64 isn't valid then Word 51 high byte holds the PIO timing - number for the maximum. Turn it into a mask and return it */ - modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ; - return modes; - /* But wait.. there's more. Design your standards by committee and - you too can get a free iordy field to process. However its the - speeds not the modes that are supported... Note drivers using the - timing API will get this right anyway */ -} + mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; + udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; -static inline void -ata_queue_pio_task(struct ata_port *ap) -{ - if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK)) - queue_work(ata_wq, &ap->pio_task); + return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } -static inline void -ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay) +/** + * ata_port_queue_task - Queue port_task + * @ap: The ata_port to queue port_task for + * + * Schedule @fn(@data) for execution after @delay jiffies using + * port_task. There is one port_task per port and it's the + * user(low level driver)'s responsibility to make sure that only + * one task is active at any given time. + * + * libata core layer takes care of synchronization between + * port_task and EH. ata_port_queue_task() may be ignored for EH + * synchronization. + * + * LOCKING: + * Inherited from caller. + */ +void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, + unsigned long delay) { - if (!(ap->flags & ATA_FLAG_FLUSH_PIO_TASK)) - queue_delayed_work(ata_wq, &ap->pio_task, delay); + int rc; + + if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK) + return; + + PREPARE_WORK(&ap->port_task, fn, data); + + if (!delay) + rc = queue_work(ata_wq, &ap->port_task); + else + rc = queue_delayed_work(ata_wq, &ap->port_task, delay); + + /* rc == 0 means that another user is using port task */ + WARN_ON(rc == 0); } /** - * ata_flush_pio_tasks - Flush pio_task - * @ap: the target ata_port + * ata_port_flush_task - Flush port_task + * @ap: The ata_port to flush port_task for * - * After this function completes, pio_task is - * guranteed not to be running or scheduled. + * After this function completes, port_task is guranteed not to + * be running or scheduled. * * LOCKING: * Kernel thread context (may sleep) */ - -static void ata_flush_pio_tasks(struct ata_port *ap) +void ata_port_flush_task(struct ata_port *ap) { - int tmp = 0; unsigned long flags; DPRINTK("ENTER\n"); spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags |= ATA_FLAG_FLUSH_PIO_TASK; + ap->flags |= ATA_FLAG_FLUSH_PORT_TASK; spin_unlock_irqrestore(&ap->host_set->lock, flags); DPRINTK("flush #1\n"); @@ -766,14 +888,13 @@ static void ata_flush_pio_tasks(struct ata_port *ap) * the FLUSH flag; thus, it will never queue pio tasks again. * Cancel and flush. */ - tmp |= cancel_delayed_work(&ap->pio_task); - if (!tmp) { + if (!cancel_delayed_work(&ap->port_task)) { DPRINTK("flush #2\n"); flush_workqueue(ata_wq); } spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_FLUSH_PIO_TASK; + ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK; spin_unlock_irqrestore(&ap->host_set->lock, flags); DPRINTK("EXIT\n"); @@ -904,7 +1025,7 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * @dev: target device * @p_class: pointer to class of the target device (may be changed) * @post_reset: is this read ID post-reset? - * @id: buffer to fill IDENTIFY page into + * @p_id: read IDENTIFY page (newly allocated) * * Read ID data from the specified device. ATA_CMD_ID_ATA is * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI @@ -919,12 +1040,13 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * 0 on success, -errno otherwise. */ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, - unsigned int *p_class, int post_reset, u16 *id) + unsigned int *p_class, int post_reset, u16 **p_id) { unsigned int class = *p_class; unsigned int using_edd; struct ata_taskfile tf; unsigned int err_mask = 0; + u16 *id; const char *reason; int rc; @@ -938,6 +1060,13 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ + id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL); + if (id == NULL) { + rc = -ENOMEM; + reason = "out of memory"; + goto err_out; + } + retry: ata_tf_init(ap, &tf, dev->devno); @@ -1028,53 +1157,59 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, } *p_class = class; + *p_id = id; return 0; err_out: printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n", ap->id, dev->devno, reason); + kfree(id); return rc; } +static inline u8 ata_dev_knobble(const struct ata_port *ap, + struct ata_device *dev) +{ + return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); +} + /** - * ata_dev_identify - obtain IDENTIFY x DEVICE page - * @ap: port on which device we wish to probe resides - * @device: device bus address, starting at zero - * - * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE - * command, and read back the 512-byte device information page. - * The device information page is fed to us via the standard - * PIO-IN protocol, but we hand-code it here. (TODO: investigate - * using standard PIO-IN paths) - * - * After reading the device information page, we use several - * bits of information from it to initialize data structures - * that will be used during the lifetime of the ata_device. - * Other data from the info page is used to disqualify certain - * older ATA devices we do not wish to support. + * ata_dev_configure - Configure the specified ATA/ATAPI device + * @ap: Port on which target device resides + * @dev: Target device to configure + * @print_info: Enable device info printout + * + * Configure @dev according to @dev->id. Generic and low-level + * driver specific fixups are also applied. * * LOCKING: - * Inherited from caller. Some functions called by this function - * obtain the host_set lock. + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, -errno otherwise */ - -static void ata_dev_identify(struct ata_port *ap, unsigned int device) +static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, + int print_info) { - struct ata_device *dev = &ap->device[device]; - unsigned long xfer_modes; + unsigned int xfer_mask; int i, rc; if (!ata_dev_present(dev)) { DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", - ap->id, device); - return; + ap->id, dev->devno); + return 0; } - DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); + DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno); - rc = ata_dev_read_id(ap, dev, &dev->class, 1, dev->id); - if (rc) - goto err_out; + /* initialize to-be-configured parameters */ + dev->flags = 0; + dev->max_sectors = 0; + dev->cdb_len = 0; + dev->n_sectors = 0; + dev->cylinders = 0; + dev->heads = 0; + dev->sectors = 0; /* * common ATA, ATAPI feature tests @@ -1083,15 +1218,12 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) /* we require DMA support (bits 8 of word 49) */ if (!ata_id_has_dma(dev->id)) { printk(KERN_DEBUG "ata%u: no dma\n", ap->id); + rc = -EINVAL; goto err_out_nosup; } - /* quick-n-dirty find max transfer mode; for printk only */ - xfer_modes = dev->id[ATA_ID_UDMA_MODES]; - if (!xfer_modes) - xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; - if (!xfer_modes) - xfer_modes = ata_pio_modes(dev); + /* find max transfer mode; for printk only */ + xfer_mask = ata_id_xfermask(dev->id); ata_dump_id(dev->id); @@ -1100,19 +1232,25 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) dev->n_sectors = ata_id_n_sectors(dev->id); if (ata_id_has_lba(dev->id)) { - dev->flags |= ATA_DFLAG_LBA; + const char *lba_desc; - if (ata_id_has_lba48(dev->id)) + lba_desc = "LBA"; + dev->flags |= ATA_DFLAG_LBA; + if (ata_id_has_lba48(dev->id)) { dev->flags |= ATA_DFLAG_LBA48; + lba_desc = "LBA48"; + } /* print device info to dmesg */ - printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", - ap->id, device, - ata_id_major_version(dev->id), - ata_mode_string(xfer_modes), - (unsigned long long)dev->n_sectors, - dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); - } else { + if (print_info) + printk(KERN_INFO "ata%u: dev %u ATA-%d, " + "max %s, %Lu sectors: %s\n", + ap->id, dev->devno, + ata_id_major_version(dev->id), + ata_mode_string(xfer_mask), + (unsigned long long)dev->n_sectors, + lba_desc); + } else { /* CHS */ /* Default translation */ @@ -1128,13 +1266,14 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) } /* print device info to dmesg */ - printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", - ap->id, device, - ata_id_major_version(dev->id), - ata_mode_string(xfer_modes), - (unsigned long long)dev->n_sectors, - (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); - + if (print_info) + printk(KERN_INFO "ata%u: dev %u ATA-%d, " + "max %s, %Lu sectors: CHS %u/%u/%u\n", + ap->id, dev->devno, + ata_id_major_version(dev->id), + ata_mode_string(xfer_mask), + (unsigned long long)dev->n_sectors, + dev->cylinders, dev->heads, dev->sectors); } if (dev->id[59] & 0x100) { @@ -1150,6 +1289,7 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) rc = atapi_cdb_len(dev->id); if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); + rc = -EINVAL; goto err_out_nosup; } dev->cdb_len = (unsigned int) rc; @@ -1158,9 +1298,9 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) dev->flags |= ATA_DFLAG_CDB_INTR; /* print device info to dmesg */ - printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", - ap->id, device, - ata_mode_string(xfer_modes)); + if (print_info) + printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", + ap->id, dev->devno, ata_mode_string(xfer_mask)); } ap->host->max_cmd_len = 0; @@ -1169,44 +1309,26 @@ static void ata_dev_identify(struct ata_port *ap, unsigned int device) ap->host->max_cmd_len, ap->device[i].cdb_len); - DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); - return; - -err_out_nosup: - printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", - ap->id, device); -err_out: - dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ - DPRINTK("EXIT, err\n"); -} - - -static inline u8 ata_dev_knobble(const struct ata_port *ap, - struct ata_device *dev) -{ - return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); -} - -/** - * ata_dev_config - Run device specific handlers & check for SATA->PATA bridges - * @ap: Bus - * @i: Device - * - * LOCKING: - */ - -void ata_dev_config(struct ata_port *ap, unsigned int i) -{ /* limit bridge transfers to udma5, 200 sectors */ - if (ata_dev_knobble(ap, &ap->device[i])) { - printk(KERN_INFO "ata%u(%u): applying bridge limits\n", - ap->id, i); + if (ata_dev_knobble(ap, dev)) { + if (print_info) + printk(KERN_INFO "ata%u(%u): applying bridge limits\n", + ap->id, dev->devno); ap->udma_mask &= ATA_UDMA5; - ap->device[i].max_sectors = ATA_MAX_SECTORS; + dev->max_sectors = ATA_MAX_SECTORS; } if (ap->ops->dev_config) - ap->ops->dev_config(ap, &ap->device[i]); + ap->ops->dev_config(ap, dev); + + DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); + return 0; + +err_out_nosup: + printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", + ap->id, dev->devno); + DPRINTK("EXIT, err\n"); + return rc; } /** @@ -1226,41 +1348,61 @@ void ata_dev_config(struct ata_port *ap, unsigned int i) static int ata_bus_probe(struct ata_port *ap) { - unsigned int i, found = 0; + unsigned int classes[ATA_MAX_DEVICES]; + unsigned int i, rc, found = 0; - if (ap->ops->probe_reset) { - unsigned int classes[ATA_MAX_DEVICES]; - int rc; + ata_port_probe(ap); - ata_port_probe(ap); + /* reset */ + if (ap->ops->probe_reset) { + for (i = 0; i < ATA_MAX_DEVICES; i++) + classes[i] = ATA_DEV_UNKNOWN; rc = ap->ops->probe_reset(ap, classes); - if (rc == 0) { - for (i = 0; i < ATA_MAX_DEVICES; i++) { - if (classes[i] == ATA_DEV_UNKNOWN) - classes[i] = ATA_DEV_NONE; - ap->device[i].class = classes[i]; - } - } else { - printk(KERN_ERR "ata%u: probe reset failed, " - "disabling port\n", ap->id); - ata_port_disable(ap); + if (rc) { + printk("ata%u: reset failed (errno=%d)\n", ap->id, rc); + return rc; } - } else + + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (classes[i] == ATA_DEV_UNKNOWN) + classes[i] = ATA_DEV_NONE; + } else { ap->ops->phy_reset(ap); - if (ap->flags & ATA_FLAG_PORT_DISABLED) - goto err_out; + for (i = 0; i < ATA_MAX_DEVICES; i++) { + if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) + classes[i] = ap->device[i].class; + else + ap->device[i].class = ATA_DEV_UNKNOWN; + } + ata_port_probe(ap); + } + /* read IDENTIFY page and configure devices */ for (i = 0; i < ATA_MAX_DEVICES; i++) { - ata_dev_identify(ap, i); - if (ata_dev_present(&ap->device[i])) { - found = 1; - ata_dev_config(ap,i); + struct ata_device *dev = &ap->device[i]; + + dev->class = classes[i]; + + if (!ata_dev_present(dev)) + continue; + + WARN_ON(dev->id != NULL); + if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { + dev->class = ATA_DEV_NONE; + continue; + } + + if (ata_dev_configure(ap, dev, 1)) { + dev->class++; /* disable device */ + continue; } + + found = 1; } - if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + if (!found) goto err_out_disable; ata_set_mode(ap); @@ -1271,7 +1413,6 @@ static int ata_bus_probe(struct ata_port *ap) err_out_disable: ap->ops->port_disable(ap); -err_out: return -1; } @@ -1567,31 +1708,8 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, return 0; } -static const struct { - unsigned int shift; - u8 base; -} xfer_mode_classes[] = { - { ATA_SHIFT_UDMA, XFER_UDMA_0 }, - { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 }, - { ATA_SHIFT_PIO, XFER_PIO_0 }, -}; - -static u8 base_from_shift(unsigned int shift) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) - if (xfer_mode_classes[i].shift == shift) - return xfer_mode_classes[i].base; - - return 0xff; -} - static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) { - int ofs, idx; - u8 base; - if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) return; @@ -1600,65 +1718,58 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) ata_dev_set_xfermode(ap, dev); - base = base_from_shift(dev->xfer_shift); - ofs = dev->xfer_mode - base; - idx = ofs + dev->xfer_shift; - WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); + if (ata_dev_revalidate(ap, dev, 0)) { + printk(KERN_ERR "ata%u: failed to revalidate after set " + "xfermode, disabled\n", ap->id); + ata_port_disable(ap); + } - DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", - idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); + DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", + dev->xfer_shift, (int)dev->xfer_mode); printk(KERN_INFO "ata%u: dev %u configured for %s\n", - ap->id, dev->devno, xfer_mode_str[idx]); + ap->id, dev->devno, + ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); } static int ata_host_set_pio(struct ata_port *ap) { - unsigned int mask; - int x, i; - u8 base, xfer_mode; - - mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO); - x = fgb(mask); - if (x < 0) { - printk(KERN_WARNING "ata%u: no PIO support\n", ap->id); - return -1; - } - - base = base_from_shift(ATA_SHIFT_PIO); - xfer_mode = base + x; - - DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n", - (int)base, (int)xfer_mode, mask, x); + int i; for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *dev = &ap->device[i]; - if (ata_dev_present(dev)) { - dev->pio_mode = xfer_mode; - dev->xfer_mode = xfer_mode; - dev->xfer_shift = ATA_SHIFT_PIO; - if (ap->ops->set_piomode) - ap->ops->set_piomode(ap, dev); + + if (!ata_dev_present(dev)) + continue; + + if (!dev->pio_mode) { + printk(KERN_WARNING "ata%u: no PIO support\n", ap->id); + return -1; } + + dev->xfer_mode = dev->pio_mode; + dev->xfer_shift = ATA_SHIFT_PIO; + if (ap->ops->set_piomode) + ap->ops->set_piomode(ap, dev); } return 0; } -static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, - unsigned int xfer_shift) +static void ata_host_set_dma(struct ata_port *ap) { int i; for (i = 0; i < ATA_MAX_DEVICES; i++) { struct ata_device *dev = &ap->device[i]; - if (ata_dev_present(dev)) { - dev->dma_mode = xfer_mode; - dev->xfer_mode = xfer_mode; - dev->xfer_shift = xfer_shift; - if (ap->ops->set_dmamode) - ap->ops->set_dmamode(ap, dev); - } + + if (!ata_dev_present(dev) || !dev->dma_mode) + continue; + + dev->xfer_mode = dev->dma_mode; + dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); + if (ap->ops->set_dmamode) + ap->ops->set_dmamode(ap, dev); } } @@ -1673,28 +1784,34 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, */ static void ata_set_mode(struct ata_port *ap) { - unsigned int xfer_shift; - u8 xfer_mode; - int rc; + int i, rc; - /* step 1: always set host PIO timings */ - rc = ata_host_set_pio(ap); - if (rc) - goto err_out; + /* step 1: calculate xfer_mask */ + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; + unsigned int xfer_mask; + + if (!ata_dev_present(dev)) + continue; + + xfer_mask = ata_dev_xfermask(ap, dev); - /* step 2: choose the best data xfer mode */ - xfer_mode = xfer_shift = 0; - rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); + dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO); + dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA | + ATA_MASK_UDMA)); + } + + /* step 2: always set host PIO timings */ + rc = ata_host_set_pio(ap); if (rc) goto err_out; - /* step 3: if that xfer mode isn't PIO, set host DMA timings */ - if (xfer_shift != ATA_SHIFT_PIO) - ata_host_set_dma(ap, xfer_mode, xfer_shift); + /* step 3: set host DMA timings */ + ata_host_set_dma(ap); /* step 4: update devices' xfer mode */ - ata_dev_set_mode(ap, &ap->device[0]); - ata_dev_set_mode(ap, &ap->device[1]); + for (i = 0; i < ATA_MAX_DEVICES; i++) + ata_dev_set_mode(ap, &ap->device[i]); if (ap->flags & ATA_FLAG_PORT_DISABLED) return; @@ -2325,11 +2442,118 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit, return rc; } -static void ata_pr_blacklisted(const struct ata_port *ap, - const struct ata_device *dev) +/** + * ata_dev_same_device - Determine whether new ID matches configured device + * @ap: port on which the device to compare against resides + * @dev: device to compare against + * @new_class: class of the new device + * @new_id: IDENTIFY page of the new device + * + * Compare @new_class and @new_id against @dev and determine + * whether @dev is the device indicated by @new_class and + * @new_id. + * + * LOCKING: + * None. + * + * RETURNS: + * 1 if @dev matches @new_class and @new_id, 0 otherwise. + */ +static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, + unsigned int new_class, const u16 *new_id) +{ + const u16 *old_id = dev->id; + unsigned char model[2][41], serial[2][21]; + u64 new_n_sectors; + + if (dev->class != new_class) { + printk(KERN_INFO + "ata%u: dev %u class mismatch %d != %d\n", + ap->id, dev->devno, dev->class, new_class); + return 0; + } + + ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0])); + ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1])); + ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0])); + ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1])); + new_n_sectors = ata_id_n_sectors(new_id); + + if (strcmp(model[0], model[1])) { + printk(KERN_INFO + "ata%u: dev %u model number mismatch '%s' != '%s'\n", + ap->id, dev->devno, model[0], model[1]); + return 0; + } + + if (strcmp(serial[0], serial[1])) { + printk(KERN_INFO + "ata%u: dev %u serial number mismatch '%s' != '%s'\n", + ap->id, dev->devno, serial[0], serial[1]); + return 0; + } + + if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) { + printk(KERN_INFO + "ata%u: dev %u n_sectors mismatch %llu != %llu\n", + ap->id, dev->devno, (unsigned long long)dev->n_sectors, + (unsigned long long)new_n_sectors); + return 0; + } + + return 1; +} + +/** + * ata_dev_revalidate - Revalidate ATA device + * @ap: port on which the device to revalidate resides + * @dev: device to revalidate + * @post_reset: is this revalidation after reset? + * + * Re-read IDENTIFY page and make sure @dev is still attached to + * the port. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * 0 on success, negative errno otherwise + */ +int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, + int post_reset) { - printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", - ap->id, dev->devno); + unsigned int class; + u16 *id; + int rc; + + if (!ata_dev_present(dev)) + return -ENODEV; + + class = dev->class; + id = NULL; + + /* allocate & read ID data */ + rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); + if (rc) + goto fail; + + /* is the device still there? */ + if (!ata_dev_same_device(ap, dev, class, id)) { + rc = -ENODEV; + goto fail; + } + + kfree(dev->id); + dev->id = id; + + /* configure device according to the new ID */ + return ata_dev_configure(ap, dev, 0); + + fail: + printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", + ap->id, dev->devno, rc); + kfree(id); + return rc; } static const char * const ata_dma_blacklist [] = { @@ -2378,128 +2602,45 @@ static int ata_dma_blacklisted(const struct ata_device *dev) return 0; } -static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift) -{ - const struct ata_device *master, *slave; - unsigned int mask; - - master = &ap->device[0]; - slave = &ap->device[1]; - - WARN_ON(!ata_dev_present(master) && !ata_dev_present(slave)); - - if (shift == ATA_SHIFT_UDMA) { - mask = ap->udma_mask; - if (ata_dev_present(master)) { - mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(master)) { - mask = 0; - ata_pr_blacklisted(ap, master); - } - } - if (ata_dev_present(slave)) { - mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(slave)) { - mask = 0; - ata_pr_blacklisted(ap, slave); - } - } - } - else if (shift == ATA_SHIFT_MWDMA) { - mask = ap->mwdma_mask; - if (ata_dev_present(master)) { - mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(master)) { - mask = 0; - ata_pr_blacklisted(ap, master); - } - } - if (ata_dev_present(slave)) { - mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(slave)) { - mask = 0; - ata_pr_blacklisted(ap, slave); - } - } - } - else if (shift == ATA_SHIFT_PIO) { - mask = ap->pio_mask; - if (ata_dev_present(master)) { - /* spec doesn't return explicit support for - * PIO0-2, so we fake it - */ - u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03; - tmp_mode <<= 3; - tmp_mode |= 0x7; - mask &= tmp_mode; - } - if (ata_dev_present(slave)) { - /* spec doesn't return explicit support for - * PIO0-2, so we fake it - */ - u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03; - tmp_mode <<= 3; - tmp_mode |= 0x7; - mask &= tmp_mode; - } - } - else { - mask = 0xffffffff; /* shut up compiler warning */ - BUG(); - } - - return mask; -} - -/* find greatest bit */ -static int fgb(u32 bitmap) -{ - unsigned int i; - int x = -1; - - for (i = 0; i < 32; i++) - if (bitmap & (1 << i)) - x = i; - - return x; -} - /** - * ata_choose_xfer_mode - attempt to find best transfer mode - * @ap: Port for which an xfer mode will be selected - * @xfer_mode_out: (output) SET FEATURES - XFER MODE code - * @xfer_shift_out: (output) bit shift that selects this mode + * ata_dev_xfermask - Compute supported xfermask of the given device + * @ap: Port on which the device to compute xfermask for resides + * @dev: Device to compute xfermask for * - * Based on host and device capabilities, determine the - * maximum transfer mode that is amenable to all. + * Compute supported xfermask of @dev. This function is + * responsible for applying all known limits including host + * controller limits, device blacklist, etc... * * LOCKING: - * PCI/etc. bus probe sem. + * None. * * RETURNS: - * Zero on success, negative on error. + * Computed xfermask. */ - -static int ata_choose_xfer_mode(const struct ata_port *ap, - u8 *xfer_mode_out, - unsigned int *xfer_shift_out) +static unsigned int ata_dev_xfermask(struct ata_port *ap, + struct ata_device *dev) { - unsigned int mask, shift; - int x, i; + unsigned long xfer_mask; + int i; - for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { - shift = xfer_mode_classes[i].shift; - mask = ata_get_mode_mask(ap, shift); + xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, + ap->udma_mask); - x = fgb(mask); - if (x >= 0) { - *xfer_mode_out = xfer_mode_classes[i].base + x; - *xfer_shift_out = shift; - return 0; - } + /* use port-wide xfermask for now */ + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *d = &ap->device[i]; + if (!ata_dev_present(d)) + continue; + xfer_mask &= ata_id_xfermask(d->id); + if (ata_dma_blacklisted(d)) + xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); } - return -1; + if (ata_dma_blacklisted(dev)) + printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " + "disabling DMA\n", ap->id, dev->devno); + + return xfer_mask; } /** @@ -3667,12 +3808,84 @@ fsm_start: } if (timeout) - ata_queue_delayed_pio_task(ap, timeout); - else if (has_next) + ata_port_queue_task(ap, ata_pio_task, ap, timeout); + else if (!qc_completed) goto fsm_start; } /** + * atapi_packet_task - Write CDB bytes to hardware + * @_data: Port to which ATAPI device is attached. + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * If DMA is to be performed, exit immediately. + * Otherwise, we are in polling mode, so poll + * status under operation succeeds or fails. + * + * LOCKING: + * Kernel thread context (may sleep) + */ + +static void atapi_packet_task(void *_data) +{ + struct ata_port *ap = _data; + struct ata_queued_cmd *qc; + u8 status; + + qc = ata_qc_from_tag(ap, ap->active_tag); + WARN_ON(qc == NULL); + WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { + qc->err_mask |= AC_ERR_TIMEOUT; + goto err_out; + } + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { + qc->err_mask |= AC_ERR_HSM; + goto err_out; + } + + /* send SCSI cdb */ + DPRINTK("send cdb\n"); + WARN_ON(qc->dev->cdb_len < 12); + + if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || + qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { + unsigned long flags; + + /* Once we're done issuing command and kicking bmdma, + * irq handler takes over. To not lose irq, we need + * to clear NOINTR flag before sending cdb, but + * interrupt handler shouldn't be invoked before we're + * finished. Hence, the following locking. + */ + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->flags &= ~ATA_FLAG_NOINTR; + ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); + if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) + ap->ops->bmdma_start(qc); /* initiate bmdma */ + spin_unlock_irqrestore(&ap->host_set->lock, flags); + } else { + ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); + + /* PIO commands are handled by polling */ + ap->hsm_task_state = HSM_ST; + ata_port_queue_task(ap, ata_pio_task, ap, 0); + } + + return; + +err_out: + ata_poll_qc_complete(qc); +} + +/** * ata_qc_timeout - Handle timeout of queued command * @qc: Command that timed out * @@ -3700,7 +3913,6 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) DPRINTK("ENTER\n"); - ata_flush_pio_tasks(ap); ap->hsm_task_state = HSM_ST_IDLE; spin_lock_irqsave(&host_set->lock, flags); @@ -4010,7 +4222,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) if (qc->tf.flags & ATA_TFLAG_WRITE) { /* PIO data out protocol */ ap->hsm_task_state = HSM_ST_FIRST; - ata_queue_pio_task(ap); + ata_port_queue_task(ap, ata_pio_task, ap, 0); /* always send first data block using * the ata_pio_task() codepath. @@ -4020,7 +4232,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST; if (qc->tf.flags & ATA_TFLAG_POLLING) - ata_queue_pio_task(ap); + ata_port_queue_task(ap, ata_pio_task, ap, 0); /* if polling, ata_pio_task() handles the rest. * otherwise, interrupt handler takes over from here. @@ -4041,7 +4253,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || (qc->tf.flags & ATA_TFLAG_POLLING)) - ata_queue_pio_task(ap); + ata_port_queue_task(ap, atapi_packet_task, ap, 0); break; case ATA_PROT_ATAPI_DMA: @@ -4053,7 +4265,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) /* send cdb by polling if no cdb interrupt */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - ata_queue_pio_task(ap); + ata_port_queue_task(ap, atapi_packet_task, ap, 0); break; default: @@ -4533,6 +4745,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) return IRQ_RETVAL(handled); } + /* * Execute a 'simple' command, that only consists of the opcode 'cmd' itself, * without filling any other registers @@ -4752,7 +4965,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ap->active_tag = ATA_TAG_POISON; ap->last_ctl = 0xFF; - INIT_WORK(&ap->pio_task, ata_pio_task, ap); + INIT_WORK(&ap->port_task, NULL, NULL); INIT_LIST_HEAD(&ap->eh_done_q); for (i = 0; i < ATA_MAX_DEVICES; i++) @@ -5007,11 +5220,14 @@ void ata_host_set_remove(struct ata_host_set *host_set) int ata_scsi_release(struct Scsi_Host *host) { struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + int i; DPRINTK("ENTER\n"); ap->ops->port_disable(ap); ata_host_remove(ap, 0); + for (i = 0; i < ATA_MAX_DEVICES; i++) + kfree(ap->device[i].id); DPRINTK("EXIT\n"); return 1; @@ -5215,9 +5431,11 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset); EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_std_probe_reset); EXPORT_SYMBOL_GPL(ata_drive_probe_reset); +EXPORT_SYMBOL_GPL(ata_dev_revalidate); EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_busy_sleep); +EXPORT_SYMBOL_GPL(ata_port_queue_task); EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_timed_out); @@ -5228,7 +5446,6 @@ EXPORT_SYMBOL_GPL(ata_host_intr); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_c_string); -EXPORT_SYMBOL_GPL(ata_dev_config); EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_eh_qc_complete); EXPORT_SYMBOL_GPL(ata_eh_qc_retry); diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index d0bd94abb41..ccedb453697 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -785,6 +785,8 @@ int ata_scsi_error(struct Scsi_Host *host) WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); spin_unlock_irqrestore(&ap->host_set->lock, flags); + ata_port_flush_task(ap); + ap->ops->eng_timeout(ap); WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index d822eba05f3..f4c48c91b63 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h @@ -45,6 +45,7 @@ extern int libata_fua; extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, struct ata_device *dev); extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); +extern void ata_port_flush_task(struct ata_port *ap); extern void ata_qc_free(struct ata_queued_cmd *qc); extern unsigned int ata_qc_issue(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 7fa807d7d9e..e5862ad5e6c 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -256,6 +256,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { board_20319 }, { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20319 }, + { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20319 }, { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, board_20319 }, { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index cdf800e3ec1..4f2a67ed39d 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c @@ -49,24 +49,30 @@ #define DRV_VERSION "0.9" enum { + /* + * host flags + */ SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), SIL_FLAG_MOD15WRITE = (1 << 30), + SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO, + /* + * Controller IDs + */ sil_3112 = 0, - sil_3112_m15w = 1, - sil_3512 = 2, - sil_3114 = 3, - - SIL_FIFO_R0 = 0x40, - SIL_FIFO_W0 = 0x41, - SIL_FIFO_R1 = 0x44, - SIL_FIFO_W1 = 0x45, - SIL_FIFO_R2 = 0x240, - SIL_FIFO_W2 = 0x241, - SIL_FIFO_R3 = 0x244, - SIL_FIFO_W3 = 0x245, + sil_3512 = 1, + sil_3114 = 2, + /* + * Register offsets + */ SIL_SYSCFG = 0x48, + + /* + * Register bits + */ + /* SYSCFG */ SIL_MASK_IDE0_INT = (1 << 22), SIL_MASK_IDE1_INT = (1 << 23), SIL_MASK_IDE2_INT = (1 << 24), @@ -75,9 +81,12 @@ enum { SIL_MASK_4PORT = SIL_MASK_2PORT | SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, - SIL_IDE2_BMDMA = 0x200, - + /* BMDMA/BMDMA2 */ SIL_INTR_STEERING = (1 << 1), + + /* + * Others + */ SIL_QUIRK_MOD15WRITE = (1 << 0), SIL_QUIRK_UDMA5MAX = (1 << 1), }; @@ -90,13 +99,13 @@ static void sil_post_set_mode (struct ata_port *ap); static const struct pci_device_id sil_pci_tbl[] = { - { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, - { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, + { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, - { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, - { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, - { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, + { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, { } /* terminate list */ }; @@ -181,18 +190,7 @@ static const struct ata_port_info sil_port_info[] = { /* sil_3112 */ { .sht = &sil_sht, - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO, - .pio_mask = 0x1f, /* pio0-4 */ - .mwdma_mask = 0x07, /* mwdma0-2 */ - .udma_mask = 0x3f, /* udma0-5 */ - .port_ops = &sil_ops, - }, - /* sil_3112_15w - keep it sync'd w/ sil_3112 */ - { - .sht = &sil_sht, - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | SIL_FLAG_MOD15WRITE, + .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x3f, /* udma0-5 */ @@ -201,9 +199,7 @@ static const struct ata_port_info sil_port_info[] = { /* sil_3512 */ { .sht = &sil_sht, - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | - SIL_FLAG_RERR_ON_DMA_ACT, + .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x3f, /* udma0-5 */ @@ -212,9 +208,7 @@ static const struct ata_port_info sil_port_info[] = { /* sil_3114 */ { .sht = &sil_sht, - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_MMIO | - SIL_FLAG_RERR_ON_DMA_ACT, + .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x3f, /* udma0-5 */ @@ -228,16 +222,17 @@ static const struct { unsigned long tf; /* ATA taskfile register block */ unsigned long ctl; /* ATA control/altstatus register block */ unsigned long bmdma; /* DMA register block */ + unsigned long fifo_cfg; /* FIFO Valid Byte Count and Control */ unsigned long scr; /* SATA control register block */ unsigned long sien; /* SATA Interrupt Enable register */ unsigned long xfer_mode;/* data transfer mode register */ unsigned long sfis_cfg; /* SATA FIS reception config register */ } sil_port[] = { /* port 0 ... */ - { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4, 0x14c }, - { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4, 0x1cc }, - { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4, 0x34c }, - { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4, 0x3cc }, + { 0x80, 0x8A, 0x00, 0x40, 0x100, 0x148, 0xb4, 0x14c }, + { 0xC0, 0xCA, 0x08, 0x44, 0x180, 0x1c8, 0xf4, 0x1cc }, + { 0x280, 0x28A, 0x200, 0x240, 0x300, 0x348, 0x2b4, 0x34c }, + { 0x2C0, 0x2CA, 0x208, 0x244, 0x380, 0x3c8, 0x2f4, 0x3cc }, /* ... port 3 */ }; @@ -418,13 +413,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto err_out_regions; - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); if (probe_ent == NULL) { rc = -ENOMEM; goto err_out_regions; } - memset(probe_ent, 0, sizeof(*probe_ent)); INIT_LIST_HEAD(&probe_ent->node); probe_ent->dev = pci_dev_to_dev(pdev); probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; @@ -461,19 +455,12 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (cls) { cls >>= 3; cls++; /* cls = (line_size/8)+1 */ - writeb(cls, mmio_base + SIL_FIFO_R0); - writeb(cls, mmio_base + SIL_FIFO_W0); - writeb(cls, mmio_base + SIL_FIFO_R1); - writeb(cls, mmio_base + SIL_FIFO_W1); - if (ent->driver_data == sil_3114) { - writeb(cls, mmio_base + SIL_FIFO_R2); - writeb(cls, mmio_base + SIL_FIFO_W2); - writeb(cls, mmio_base + SIL_FIFO_R3); - writeb(cls, mmio_base + SIL_FIFO_W3); - } + for (i = 0; i < probe_ent->n_ports; i++) + writew(cls << 8 | cls, + mmio_base + sil_port[i].fifo_cfg); } else dev_printk(KERN_WARNING, &pdev->dev, - "cache line size not set. Driver may not function\n"); + "cache line size not set. Driver may not function\n"); /* Apply R_ERR on DMA activate FIS errata workaround */ if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { @@ -496,10 +483,10 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) irq_mask = SIL_MASK_4PORT; /* flip the magic "make 4 ports work" bit */ - tmp = readl(mmio_base + SIL_IDE2_BMDMA); + tmp = readl(mmio_base + sil_port[2].bmdma); if ((tmp & SIL_INTR_STEERING) == 0) writel(tmp | SIL_INTR_STEERING, - mmio_base + SIL_IDE2_BMDMA); + mmio_base + sil_port[2].bmdma); } else { irq_mask = SIL_MASK_2PORT; diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c index 5c5822e33a4..8fb62427be8 100644 --- a/drivers/scsi/sata_sil24.c +++ b/drivers/scsi/sata_sil24.c @@ -892,6 +892,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) probe_ent->sht = pinfo->sht; probe_ent->host_flags = pinfo->host_flags; probe_ent->pio_mask = pinfo->pio_mask; + probe_ent->mwdma_mask = pinfo->mwdma_mask; probe_ent->udma_mask = pinfo->udma_mask; probe_ent->port_ops = pinfo->port_ops; probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 5d02ff4db6c..b65462f7648 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -192,7 +192,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) SDev = cd->device; if (!sense) { - sense = kmalloc(sizeof(*sense), GFP_KERNEL); + sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!sense) { err = -ENOMEM; goto out; |