diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/libata-core.c | 576 | ||||
-rw-r--r-- | drivers/scsi/pdc_adma.c | 8 | ||||
-rw-r--r-- | drivers/scsi/sata_mv.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_nv.c | 4 | ||||
-rw-r--r-- | drivers/scsi/sata_promise.c | 7 | ||||
-rw-r--r-- | drivers/scsi/sata_qstor.c | 11 | ||||
-rw-r--r-- | drivers/scsi/sata_sx4.c | 6 | ||||
-rw-r--r-- | drivers/scsi/sata_vsc.c | 6 |
8 files changed, 454 insertions, 171 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 93ae2dcc837..c67c7a49c8b 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -73,6 +73,7 @@ static int fgb(u32 bitmap); static int ata_choose_xfer_mode(const struct ata_port *ap, u8 *xfer_mode_out, unsigned int *xfer_shift_out); +static void ata_pio_error(struct ata_port *ap); static unsigned int ata_unique_id = 1; static struct workqueue_struct *ata_wq; @@ -1385,6 +1386,12 @@ retry: } + if (dev->id[59] & 0x100) { + dev->multi_count = dev->id[59] & 0xff; + DPRINTK("ata%u: dev %u multi count %u\n", + ap->id, device, dev->multi_count); + } + ap->host->max_cmd_len = 16; } @@ -1401,6 +1408,9 @@ retry: ap->cdb_len = (unsigned int) rc; ap->host->max_cmd_len = (unsigned char) ap->cdb_len; + if (ata_id_cdb_intr(dev->id)) + dev->flags |= ATA_DFLAG_CDB_INTR; + /* print device info to dmesg */ printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", ap->id, device, @@ -2870,7 +2880,6 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) unsigned long flags; spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_NOINTR; ata_irq_on(ap); ata_qc_complete(qc); spin_unlock_irqrestore(&ap->host_set->lock, flags); @@ -2936,7 +2945,8 @@ static unsigned long ata_pio_poll(struct ata_port *ap) * None. (executing in kernel thread context) * * RETURNS: - * Non-zero if qc completed, zero otherwise. + * Zero if qc completed. + * Non-zero if has next. */ static int ata_pio_complete (struct ata_port *ap) @@ -2949,7 +2959,7 @@ static int ata_pio_complete (struct ata_port *ap) * we enter, BSY will be cleared in a chk-status or two. If not, * the drive is probably seeking or something. Snooze for a couple * msecs, then chk-status again. If still busy, fall back to - * HSM_ST_POLL state. + * HSM_ST_LAST_POLL state. */ drv_stat = ata_busy_wait(ap, ATA_BUSY, 10); if (drv_stat & ATA_BUSY) { @@ -2958,7 +2968,7 @@ static int ata_pio_complete (struct ata_port *ap) if (drv_stat & ATA_BUSY) { ap->hsm_task_state = HSM_ST_LAST_POLL; ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; - return 0; + return 1; } } @@ -2969,7 +2979,7 @@ static int ata_pio_complete (struct ata_port *ap) if (!ata_ok(drv_stat)) { qc->err_mask |= __ac_err_mask(drv_stat); ap->hsm_task_state = HSM_ST_ERR; - return 0; + return 1; } ap->hsm_task_state = HSM_ST_IDLE; @@ -2979,7 +2989,7 @@ static int ata_pio_complete (struct ata_port *ap) /* another command may start at this point */ - return 1; + return 0; } @@ -3151,7 +3161,23 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; - buf = kmap(page) + offset; + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + local_irq_save(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ + ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); + + kunmap_atomic(buf, KM_IRQ0); + local_irq_restore(flags); + } else { + buf = page_address(page); + ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write); + } qc->cursect++; qc->cursg_ofs++; @@ -3160,14 +3186,153 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) qc->cursg++; qc->cursg_ofs = 0; } +} - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); +/** + * ata_pio_sectors - Transfer one or many 512-byte sectors. + * @qc: Command on going + * + * Transfer one or many ATA_SECT_SIZE of data from/to the + * ATA device for the DRQ request. + * + * LOCKING: + * Inherited from caller. + */ + +static void ata_pio_sectors(struct ata_queued_cmd *qc) +{ + if (is_multi_taskfile(&qc->tf)) { + /* READ/WRITE MULTIPLE */ + unsigned int nsect; - /* do the actual data transfer */ - do_write = (qc->tf.flags & ATA_TFLAG_WRITE); - ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); + assert(qc->dev->multi_count); - kunmap(page); + nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); + while (nsect--) + ata_pio_sector(qc); + } else + ata_pio_sector(qc); +} + +/** + * atapi_send_cdb - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * @qc: Taskfile currently active + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * + * LOCKING: + * caller. + */ + +static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + /* send SCSI cdb */ + DPRINTK("send cdb\n"); + assert(ap->cdb_len >= 12); + + ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); + ata_altstatus(ap); /* flush */ + + switch (qc->tf.protocol) { + case ATA_PROT_ATAPI: + ap->hsm_task_state = HSM_ST; + break; + case ATA_PROT_ATAPI_NODATA: + ap->hsm_task_state = HSM_ST_LAST; + break; + case ATA_PROT_ATAPI_DMA: + ap->hsm_task_state = HSM_ST_LAST; + /* initiate bmdma */ + ap->ops->bmdma_start(qc); + break; + } +} + +/** + * ata_pio_first_block - Write first data block to hardware + * @ap: Port to which ATA/ATAPI device is attached. + * + * When device has indicated its readiness to accept + * the data, this function sends out the CDB or + * the first data block by PIO. + * After this, + * - If polling, ata_pio_task() handles the rest. + * - Otherwise, interrupt handler takes over. + * + * LOCKING: + * Kernel thread context (may sleep) + * + * RETURNS: + * Zero if irq handler takes over + * Non-zero if has next (polling). + */ + +static int ata_pio_first_block(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + unsigned long flags; + int has_next; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + /* if polling, we will stay in the work queue after sending the data. + * otherwise, interrupt handler takes over after sending the data. + */ + has_next = (qc->tf.flags & ATA_TFLAG_POLLING); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) { + qc->err_mask |= AC_ERR_ATA_BUS; + ap->hsm_task_state = HSM_ST_TMOUT; + goto err_out; + } + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { + /* device status error */ + qc->err_mask |= AC_ERR_ATA_BUS; + ap->hsm_task_state = HSM_ST_ERR; + goto err_out; + } + + /* Send the CDB (atapi) or the first data block (ata pio out). + * During the state transition, interrupt handler shouldn't + * be invoked before the data transfer is complete and + * hsm_task_state is changed. Hence, the following locking. + */ + spin_lock_irqsave(&ap->host_set->lock, flags); + + if (qc->tf.protocol == ATA_PROT_PIO) { + /* PIO data out protocol. + * send first data block. + */ + + /* ata_pio_sectors() might change the state to HSM_ST_LAST. + * so, the state is changed here before ata_pio_sectors(). + */ + ap->hsm_task_state = HSM_ST; + ata_pio_sectors(qc); + ata_altstatus(ap); /* flush */ + } else + /* send CDB */ + atapi_send_cdb(ap, qc); + + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + /* if polling, ata_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + return has_next; + +err_out: + return 1; /* has next */ } /** @@ -3233,7 +3398,23 @@ next_sg: /* don't cross page boundaries */ count = min(count, (unsigned int)PAGE_SIZE - offset); - buf = kmap(page) + offset; + DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + if (PageHighMem(page)) { + unsigned long flags; + + local_irq_save(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ + ata_data_xfer(ap, buf + offset, count, do_write); + + kunmap_atomic(buf, KM_IRQ0); + local_irq_restore(flags); + } else { + buf = page_address(page); + ata_data_xfer(ap, buf + offset, count, do_write); + } bytes -= count; qc->curbytes += count; @@ -3244,13 +3425,6 @@ next_sg: qc->cursg_ofs = 0; } - DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); - - /* do the actual data transfer */ - ata_data_xfer(ap, buf, count, do_write); - - kunmap(page); - if (bytes) goto next_sg; } @@ -3287,6 +3461,8 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) if (do_write != i_write) goto err_out; + VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes); + __atapi_pio_bytes(qc, bytes); return; @@ -3357,19 +3533,22 @@ static void ata_pio_block(struct ata_port *ap) return; } - ata_pio_sector(qc); + ata_pio_sectors(qc); } + + ata_altstatus(ap); /* flush */ } static void ata_pio_error(struct ata_port *ap) { struct ata_queued_cmd *qc; - printk(KERN_WARNING "ata%u: PIO error\n", ap->id); - qc = ata_qc_from_tag(ap, ap->active_tag); assert(qc != NULL); + if (qc->tf.command != ATA_CMD_PACKET) + printk(KERN_WARNING "ata%u: PIO error\n", ap->id); + /* make sure qc->err_mask is available to * know what's wrong and recover */ @@ -3384,22 +3563,23 @@ static void ata_pio_task(void *_data) { struct ata_port *ap = _data; unsigned long timeout; - int qc_completed; + int has_next; fsm_start: timeout = 0; - qc_completed = 0; + has_next = 1; switch (ap->hsm_task_state) { - case HSM_ST_IDLE: - return; + case HSM_ST_FIRST: + has_next = ata_pio_first_block(ap); + break; case HSM_ST: ata_pio_block(ap); break; case HSM_ST_LAST: - qc_completed = ata_pio_complete(ap); + has_next = ata_pio_complete(ap); break; case HSM_ST_POLL: @@ -3411,11 +3591,15 @@ fsm_start: case HSM_ST_ERR: ata_pio_error(ap); return; + + default: + BUG(); + return; } if (timeout) queue_delayed_work(ata_wq, &ap->pio_task, timeout); - else if (!qc_completed) + else if (has_next) goto fsm_start; } @@ -3470,6 +3654,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", ap->id, qc->tf.command, drv_stat, host_stat); + ap->hsm_task_state = HSM_ST_IDLE; + /* complete taskfile transaction */ qc->err_mask |= ac_err_mask(drv_stat); break; @@ -3720,43 +3906,103 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + /* Use polling pio if the LLD doesn't handle + * interrupt driven pio and atapi CDB interrupt. + */ + if (ap->flags & ATA_FLAG_PIO_POLLING) { + switch (qc->tf.protocol) { + case ATA_PROT_PIO: + case ATA_PROT_ATAPI: + case ATA_PROT_ATAPI_NODATA: + qc->tf.flags |= ATA_TFLAG_POLLING; + break; + case ATA_PROT_ATAPI_DMA: + if (qc->dev->flags & ATA_DFLAG_CDB_INTR) + BUG(); + break; + default: + break; + } + } + + /* select the device */ ata_dev_select(ap, qc->dev->devno, 1, 0); + /* start the command */ switch (qc->tf.protocol) { case ATA_PROT_NODATA: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + ata_tf_to_host(ap, &qc->tf); + ap->hsm_task_state = HSM_ST_LAST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + queue_work(ata_wq, &ap->pio_task); + break; case ATA_PROT_DMA: + assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->bmdma_setup(qc); /* set up bmdma */ ap->ops->bmdma_start(qc); /* initiate bmdma */ + ap->hsm_task_state = HSM_ST_LAST; break; - case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ - ata_qc_set_polling(qc); - ata_tf_to_host(ap, &qc->tf); - ap->hsm_task_state = HSM_ST; - queue_work(ata_wq, &ap->pio_task); - break; + case ATA_PROT_PIO: + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); - case ATA_PROT_ATAPI: - ata_qc_set_polling(qc); ata_tf_to_host(ap, &qc->tf); - queue_work(ata_wq, &ap->packet_task); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + /* PIO data out protocol */ + ap->hsm_task_state = HSM_ST_FIRST; + queue_work(ata_wq, &ap->pio_task); + + /* always send first data block using + * the ata_pio_task() codepath. + */ + } else { + /* PIO data in protocol */ + ap->hsm_task_state = HSM_ST; + + if (qc->tf.flags & ATA_TFLAG_POLLING) + queue_work(ata_wq, &ap->pio_task); + + /* if polling, ata_pio_task() handles the rest. + * otherwise, interrupt handler takes over from here. + */ + } + break; + case ATA_PROT_ATAPI: case ATA_PROT_ATAPI_NODATA: - ap->flags |= ATA_FLAG_NOINTR; + if (qc->tf.flags & ATA_TFLAG_POLLING) + ata_qc_set_polling(qc); + ata_tf_to_host(ap, &qc->tf); - queue_work(ata_wq, &ap->packet_task); + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || + (qc->tf.flags & ATA_TFLAG_POLLING)) + queue_work(ata_wq, &ap->pio_task); break; case ATA_PROT_ATAPI_DMA: - ap->flags |= ATA_FLAG_NOINTR; + assert(!(qc->tf.flags & ATA_TFLAG_POLLING)); + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->bmdma_setup(qc); /* set up bmdma */ - queue_work(ata_wq, &ap->packet_task); + ap->hsm_task_state = HSM_ST_FIRST; + + /* send cdb by polling if no cdb interrupt */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + queue_work(ata_wq, &ap->pio_task); break; default: @@ -4017,48 +4263,160 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) inline unsigned int ata_host_intr (struct ata_port *ap, struct ata_queued_cmd *qc) { - u8 status, host_stat; + u8 status, host_stat = 0; - switch (qc->tf.protocol) { - - case ATA_PROT_DMA: - case ATA_PROT_ATAPI_DMA: - case ATA_PROT_ATAPI: - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); + VPRINTK("ata%u: protocol %d task_state %d\n", + ap->id, qc->tf.protocol, ap->hsm_task_state); - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) + /* Check whether we are expecting interrupt in this state */ + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Check the ATA_DFLAG_CDB_INTR flag is enough here. + * The flag was turned on only for atapi devices. + * No need to check is_atapi_taskfile(&qc->tf) again. + */ + if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) goto idle_irq; + break; + case HSM_ST_LAST: + if (qc->tf.protocol == ATA_PROT_DMA || + qc->tf.protocol == ATA_PROT_ATAPI_DMA) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + goto idle_irq; + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transfering data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + break; + case HSM_ST: + break; + default: + goto idle_irq; + } - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); + /* check altstatus */ + status = ata_altstatus(ap); + if (status & ATA_BUSY) + goto idle_irq; - /* fall through */ + /* check main status, clearing INTRQ */ + status = ata_chk_status(ap); + if (unlikely(status & ATA_BUSY)) + goto idle_irq; - case ATA_PROT_ATAPI_NODATA: - case ATA_PROT_NODATA: - /* check altstatus */ - status = ata_altstatus(ap); - if (status & ATA_BUSY) - goto idle_irq; + DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", + ap->id, qc->tf.protocol, ap->hsm_task_state, status); - /* check main status, clearing INTRQ */ - status = ata_chk_status(ap); - if (unlikely(status & ATA_BUSY)) - goto idle_irq; - DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", - ap->id, qc->tf.protocol, status); + /* ack bmdma irq events */ + ap->ops->irq_clear(ap); - /* ack bmdma irq events */ - ap->ops->irq_clear(ap); + /* check error */ + if (unlikely(status & (ATA_ERR | ATA_DF))) { + qc->err_mask |= AC_ERR_DEV; + ap->hsm_task_state = HSM_ST_ERR; + } + +fsm_start: + switch (ap->hsm_task_state) { + case HSM_ST_FIRST: + /* Some pre-ATAPI-4 devices assert INTRQ + * at this state when ready to receive CDB. + */ + + /* check device status */ + if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) { + /* Wrong status. Let EH handle this */ + qc->err_mask |= AC_ERR_ATA_BUS; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + atapi_send_cdb(ap, qc); + + break; + + case HSM_ST: + /* complete command or read/write the data register */ + if (qc->tf.protocol == ATA_PROT_ATAPI) { + /* ATAPI PIO protocol */ + if ((status & ATA_DRQ) == 0) { + /* no more data to transfer */ + ap->hsm_task_state = HSM_ST_LAST; + goto fsm_start; + } + + atapi_pio_bytes(qc); + + if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) + /* bad ireason reported by device */ + goto fsm_start; + + } else { + /* ATA PIO protocol */ + if (unlikely((status & ATA_DRQ) == 0)) { + /* handle BSY=0, DRQ=0 as error */ + qc->err_mask |= AC_ERR_ATA_BUS; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + ata_pio_sectors(qc); + + if (ap->hsm_task_state == HSM_ST_LAST && + (!(qc->tf.flags & ATA_TFLAG_WRITE))) { + /* all data read */ + ata_altstatus(ap); + status = ata_chk_status(ap); + goto fsm_start; + } + } + + ata_altstatus(ap); /* flush */ + break; + + case HSM_ST_LAST: + if (unlikely(status & ATA_DRQ)) { + /* handle DRQ=1 as error */ + qc->err_mask |= AC_ERR_ATA_BUS; + ap->hsm_task_state = HSM_ST_ERR; + goto fsm_start; + } + + /* no more data to transfer */ + DPRINTK("ata%u: command complete, drv_stat 0x%x\n", + ap->id, status); + + ap->hsm_task_state = HSM_ST_IDLE; /* complete taskfile transaction */ qc->err_mask |= ac_err_mask(status); ata_qc_complete(qc); break; + case HSM_ST_ERR: + if (qc->tf.command != ATA_CMD_PACKET) + printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n", + ap->id, status, host_stat); + + /* make sure qc->err_mask is available to + * know what's wrong and recover + */ + assert(qc->err_mask); + + ap->hsm_task_state = HSM_ST_IDLE; + ata_qc_complete(qc); + break; default: goto idle_irq; } @@ -4109,11 +4467,11 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) ap = host_set->ports[i]; if (ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN)) && + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && (qc->flags & ATA_QCFLAG_ACTIVE)) handled |= ata_host_intr(ap, qc); } @@ -4125,79 +4483,6 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) } /** - * atapi_packet_task - Write CDB bytes to hardware - * @_data: Port to which ATAPI device is attached. - * - * When device has indicated its readiness to accept - * a CDB, this function is called. Send the CDB. - * If DMA is to be performed, exit immediately. - * Otherwise, we are in polling mode, so poll - * status under operation succeeds or fails. - * - * LOCKING: - * Kernel thread context (may sleep) - */ - -static void atapi_packet_task(void *_data) -{ - struct ata_port *ap = _data; - struct ata_queued_cmd *qc; - u8 status; - - qc = ata_qc_from_tag(ap, ap->active_tag); - assert(qc != NULL); - assert(qc->flags & ATA_QCFLAG_ACTIVE); - - /* sleep-wait for BSY to clear */ - DPRINTK("busy wait\n"); - if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { - qc->err_mask |= AC_ERR_TIMEOUT; - goto err_out; - } - - /* make sure DRQ is set */ - status = ata_chk_status(ap); - if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) { - qc->err_mask |= AC_ERR_HSM; - goto err_out; - } - - /* send SCSI cdb */ - DPRINTK("send cdb\n"); - assert(ap->cdb_len >= 12); - - if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || - qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { - unsigned long flags; - - /* Once we're done issuing command and kicking bmdma, - * irq handler takes over. To not lose irq, we need - * to clear NOINTR flag before sending cdb, but - * interrupt handler shouldn't be invoked before we're - * finished. Hence, the following locking. - */ - spin_lock_irqsave(&ap->host_set->lock, flags); - ap->flags &= ~ATA_FLAG_NOINTR; - ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); - if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) - ap->ops->bmdma_start(qc); /* initiate bmdma */ - spin_unlock_irqrestore(&ap->host_set->lock, flags); - } else { - ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); - - /* PIO commands are handled by polling */ - ap->hsm_task_state = HSM_ST; - queue_work(ata_wq, &ap->pio_task); - } - - return; - -err_out: - ata_poll_qc_complete(qc); -} - - -/** * ata_port_start - Set port up for dma. * @ap: Port to initialize * @@ -4413,7 +4698,6 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ap->active_tag = ATA_TAG_POISON; ap->last_ctl = 0xFF; - INIT_WORK(&ap->packet_task, atapi_packet_task, ap); INIT_WORK(&ap->pio_task, ata_pio_task, ap); INIT_LIST_HEAD(&ap->eh_done_q); diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c index 3a6bf58dc37..2dca6c53868 100644 --- a/drivers/scsi/pdc_adma.c +++ b/drivers/scsi/pdc_adma.c @@ -457,13 +457,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) continue; handled = 1; adma_enter_reg_mode(ap); - if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)) + if (ap->flags & ATA_FLAG_PORT_DISABLED) continue; pp = ap->private_data; if (!pp || pp->state != adma_state_pkt) continue; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { if ((status & (aPERR | aPSD | aUIRQ))) qc->err_mask |= AC_ERR_OTHER; else if (pp->pkt[0] != cDONE) @@ -482,13 +482,13 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) for (port_no = 0; port_no < host_set->n_ports; ++port_no) { struct ata_port *ap; ap = host_set->ports[port_no]; - if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { struct ata_queued_cmd *qc; struct adma_port_priv *pp = ap->private_data; if (!pp || pp->state != adma_state_mmio) continue; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { /* check main status, clearing INTRQ */ u8 status = ata_check_status(ap); diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index d9a554ca45c..0042c7840d8 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -87,7 +87,7 @@ enum { MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI), + ATA_FLAG_PIO_POLLING), MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, CRQB_FLAG_READ = (1 << 0), @@ -1220,8 +1220,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, handled++; } - if (ap && - (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) + if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) continue; err_mask = ac_err_mask(ata_status); @@ -1242,7 +1241,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, VPRINTK("port %u IRQ found for qc, " "ata_status 0x%x\n", port,ata_status); /* mark qc status appropriately */ - if (!(qc->tf.ctl & ATA_NIEN)) { + if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { qc->err_mask |= err_mask; ata_qc_complete(qc); } diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index bbbb55eeb73..945194b7699 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c @@ -309,11 +309,11 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, ap = host_set->ports[i]; if (ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += ata_host_intr(ap, qc); else // No request pending? Clear interrupt status diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 77ef646b0f9..760c56f7163 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -73,7 +73,8 @@ enum { PDC_RESET = (1 << 11), /* HDMA reset */ PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | - ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI, + ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | + ATA_FLAG_PIO_POLLING, }; @@ -502,11 +503,11 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r ap = host_set->ports[i]; tmp = mask & (1 << (i + 1)); if (tmp && ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc_host_intr(ap, qc); } } diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index 2afbeb77f6f..b2f87da7573 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c @@ -177,7 +177,7 @@ static const struct ata_port_info qs_port_info[] = { .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | //FIXME ATA_FLAG_SRST | - ATA_FLAG_MMIO, + ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, .pio_mask = 0x10, /* pio4 */ .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &qs_ata_ops, @@ -396,14 +396,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", sff1, sff0, port_no, sHST, sDST); handled = 1; - if (ap && !(ap->flags & - (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { + if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; struct qs_port_priv *pp = ap->private_data; if (!pp || pp->state != qs_state_pkt) continue; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { switch (sHST) { case 0: /* successful CPB */ case 3: /* device error */ @@ -430,13 +429,13 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) struct ata_port *ap; ap = host_set->ports[port_no]; if (ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; struct qs_port_priv *pp = ap->private_data; if (!pp || pp->state != qs_state_mmio) continue; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { /* check main status, clearing INTRQ */ u8 status = ata_check_status(ap); diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 9f992fbcf2e..c3975fada63 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c @@ -220,7 +220,7 @@ static const struct ata_port_info pdc_port_info[] = { .sht = &pdc_sata_sht, .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI, + ATA_FLAG_PIO_POLLING, .pio_mask = 0x1f, /* pio0-4 */ .mwdma_mask = 0x07, /* mwdma0-2 */ .udma_mask = 0x7f, /* udma0-6 ; FIXME */ @@ -835,11 +835,11 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re tmp = mask & (1 << i); VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); if (tmp && ap && - !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += pdc20621_host_intr(ap, qc, (i > 4), mmio_base); } diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index 2e2c3b7acb0..3e34fedd210 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -201,12 +201,12 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct ata_port *ap; ap = host_set->ports[i]; - if (ap && !(ap->flags & - (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) { + if (ap && + !(ap->flags & ATA_FLAG_PORT_DISABLED)) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (!(qc->tf.ctl & ATA_NIEN))) + if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) handled += ata_host_intr(ap, qc); } } |