summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libata-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r--drivers/scsi/libata-core.c247
1 files changed, 207 insertions, 40 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 4154e5b6bad..dee4b12b034 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1314,12 +1314,12 @@ static inline u8 ata_dev_knobble(struct ata_port *ap)
/**
* ata_dev_config - Run device specific handlers and check for
* SATA->PATA bridges
- * @ap: Bus
+ * @ap: Bus
* @i: Device
*
* LOCKING:
*/
-
+
void ata_dev_config(struct ata_port *ap, unsigned int i)
{
/* limit bridge transfers to udma5, 200 sectors */
@@ -2387,6 +2387,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
}
/**
+ * ata_poll_qc_complete - turn irq back on and finish qc
+ * @qc: Command to complete
+ * @drv_stat: ATA status register content
+ *
+ * LOCKING:
+ * None. (grabs host lock)
+ */
+
+void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags &= ~ATA_FLAG_NOINTR;
+ ata_irq_on(ap);
+ ata_qc_complete(qc, drv_stat);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
* ata_pio_poll -
* @ap:
*
@@ -2448,11 +2469,10 @@ static void ata_pio_complete (struct ata_port *ap)
u8 drv_stat;
/*
- * This is purely hueristic. This is a fast path.
- * Sometimes when we enter, BSY will be cleared in
- * a chk-status or two. If not, the drive is probably seeking
- * or something. Snooze for a couple msecs, then
- * chk-status again. If still busy, fall back to
+ * This is purely heuristic. This is a fast path. Sometimes when
+ * we enter, BSY will be cleared in a chk-status or two. If not,
+ * the drive is probably seeking or something. Snooze for a couple
+ * msecs, then chk-status again. If still busy, fall back to
* PIO_ST_POLL state.
*/
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
@@ -2477,9 +2497,7 @@ static void ata_pio_complete (struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, drv_stat);
+ ata_poll_qc_complete(qc, drv_stat);
}
@@ -2504,6 +2522,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
#endif /* __BIG_ENDIAN */
}
+/**
+ * ata_mmio_data_xfer - Transfer data by MMIO
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register by MMIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
@@ -2512,6 +2544,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
u16 *buf16 = (u16 *) buf;
void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+ /* Transfer multiple of 2 bytes */
if (write_data) {
for (i = 0; i < words; i++)
writew(le16_to_cpu(buf16[i]), mmio);
@@ -2519,19 +2552,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
for (i = 0; i < words; i++)
buf16[i] = cpu_to_le16(readw(mmio));
}
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (write_data) {
+ memcpy(align_buf, trailing_buf, 1);
+ writew(le16_to_cpu(align_buf[0]), mmio);
+ } else {
+ align_buf[0] = cpu_to_le16(readw(mmio));
+ memcpy(trailing_buf, align_buf, 1);
+ }
+ }
}
+/**
+ * ata_pio_data_xfer - Transfer data by PIO
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register by PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
- unsigned int dwords = buflen >> 1;
+ unsigned int words = buflen >> 1;
+ /* Transfer multiple of 2 bytes */
if (write_data)
- outsw(ap->ioaddr.data_addr, buf, dwords);
+ outsw(ap->ioaddr.data_addr, buf, words);
else
- insw(ap->ioaddr.data_addr, buf, dwords);
+ insw(ap->ioaddr.data_addr, buf, words);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (write_data) {
+ memcpy(align_buf, trailing_buf, 1);
+ outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+ } else {
+ align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
+ memcpy(trailing_buf, align_buf, 1);
+ }
+ }
}
+/**
+ * ata_data_xfer - Transfer data from/to the data register.
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int do_write)
{
@@ -2541,6 +2631,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
ata_pio_data_xfer(ap, buf, buflen, do_write);
}
+/**
+ * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ * @qc: Command on going
+ *
+ * Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2579,6 +2679,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
kunmap(page);
}
+/**
+ * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ * @bytes: number of bytes
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2588,10 +2700,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
unsigned char *buf;
unsigned int offset, count;
- if (qc->curbytes == qc->nbytes - bytes)
+ if (qc->curbytes + bytes >= qc->nbytes)
ap->pio_task_state = PIO_ST_LAST;
next_sg:
+ if (unlikely(qc->cursg >= qc->n_elem)) {
+ /*
+ * The end of qc->sg is reached and the device expects
+ * more data to transfer. In order not to overrun qc->sg
+ * and fulfill length specified in the byte count register,
+ * - for read case, discard trailing data from the device
+ * - for write case, padding zero data to the device
+ */
+ u16 pad_buf[1] = { 0 };
+ unsigned int words = bytes >> 1;
+ unsigned int i;
+
+ if (words) /* warning if bytes > 1 */
+ printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
+ ap->id, bytes);
+
+ for (i = 0; i < words; i++)
+ ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
+
+ ap->pio_task_state = PIO_ST_LAST;
+ return;
+ }
+
sg = &qc->sg[qc->cursg];
page = sg->page;
@@ -2625,11 +2760,21 @@ next_sg:
kunmap(page);
- if (bytes) {
+ if (bytes)
goto next_sg;
- }
}
+/**
+ * atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -2702,9 +2847,7 @@ static void ata_pio_block(struct ata_port *ap)
if ((status & ATA_DRQ) == 0) {
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, status);
+ ata_poll_qc_complete(qc, status);
return;
}
@@ -2734,9 +2877,7 @@ static void ata_pio_error(struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, drv_stat | ATA_ERR);
+ ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
}
static void ata_pio_task(void *_data)
@@ -2842,8 +2983,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_host_set *host_set = ap->host_set;
struct ata_device *dev = qc->dev;
u8 host_stat = 0, drv_stat;
+ unsigned long flags;
DPRINTK("ENTER\n");
@@ -2854,7 +2997,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
/* finish completing original command */
+ spin_lock_irqsave(&host_set->lock, flags);
__ata_qc_complete(qc);
+ spin_unlock_irqrestore(&host_set->lock, flags);
atapi_request_sense(ap, dev, cmd);
@@ -2865,6 +3010,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
}
}
+ spin_lock_irqsave(&host_set->lock, flags);
+
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
@@ -2880,7 +3027,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
host_stat = ap->ops->bmdma_status(ap);
/* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(ap);
+ ap->ops->bmdma_stop(qc);
/* fall through */
@@ -2898,6 +3045,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
ata_qc_complete(qc, drv_stat);
break;
}
+
+ spin_unlock_irqrestore(&host_set->lock, flags);
+
out:
DPRINTK("EXIT\n");
}
@@ -3071,9 +3221,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc);
+ /* atapi: mark qc as inactive to prevent the interrupt handler
+ * from completing the command twice later, before the error handler
+ * is called. (when rc != 0 and atapi request sense is needed)
+ */
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+
/* call completion callback */
rc = qc->complete_fn(qc, drv_stat);
- qc->flags &= ~ATA_QCFLAG_ACTIVE;
/* if callback indicates not to complete command (non-zero),
* return immediately
@@ -3203,11 +3358,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
break;
case ATA_PROT_ATAPI_NODATA:
+ ap->flags |= ATA_FLAG_NOINTR;
ata_tf_to_host_nolock(ap, &qc->tf);
queue_work(ata_wq, &ap->packet_task);
break;
case ATA_PROT_ATAPI_DMA:
+ ap->flags |= ATA_FLAG_NOINTR;
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */
queue_work(ata_wq, &ap->packet_task);
@@ -3252,7 +3409,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
}
/**
- * ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
@@ -3423,7 +3580,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
- * @ap: Port associated with this ATA transaction.
+ * @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
@@ -3433,8 +3590,9 @@ u8 ata_bmdma_status(struct ata_port *ap)
* spin_lock_irqsave(host_set lock)
*/
-void ata_bmdma_stop(struct ata_port *ap)
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
@@ -3486,7 +3644,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
goto idle_irq;
/* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(ap);
+ ap->ops->bmdma_stop(qc);
/* fall through */
@@ -3561,7 +3719,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
struct ata_port *ap;
ap = host_set->ports[i];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -3613,19 +3772,27 @@ static void atapi_packet_task(void *_data)
/* send SCSI cdb */
DPRINTK("send cdb\n");
assert(ap->cdb_len >= 12);
- ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
- /* if we are DMA'ing, irq handler takes over from here */
- if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
- ap->ops->bmdma_start(qc); /* initiate bmdma */
+ if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
+ qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
+ unsigned long flags;
- /* non-data commands are also handled via irq */
- else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
- /* do nothing */
- }
+ /* Once we're done issuing command and kicking bmdma,
+ * irq handler takes over. To not lose irq, we need
+ * to clear NOINTR flag before sending cdb, but
+ * interrupt handler shouldn't be invoked before we're
+ * finished. Hence, the following locking.
+ */
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags &= ~ATA_FLAG_NOINTR;
+ ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+ if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+ } else {
+ ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
- /* PIO commands are handled by polling */
- else {
+ /* PIO commands are handled by polling */
ap->pio_task_state = PIO_ST;
queue_work(ata_wq, &ap->pio_task);
}
@@ -3633,7 +3800,7 @@ static void atapi_packet_task(void *_data)
return;
err_out:
- ata_qc_complete(qc, ATA_ERR);
+ ata_poll_qc_complete(qc, ATA_ERR);
}