summaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-sff.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-14 10:31:59 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-14 10:31:59 +0200
commitb4ba0ba24b57ec975482f4ba2d350fbee7557240 (patch)
treec076e4c4e446180d6a36df3d55ae2ba7b0d7736e /drivers/ata/libata-sff.c
parenta033c332e047397904ed74816946b2edd9b0d5cd (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
Merge commit 'v2.6.26' into core/locking
Diffstat (limited to 'drivers/ata/libata-sff.c')
-rw-r--r--drivers/ata/libata-sff.c145
1 files changed, 121 insertions, 24 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 3c2d2289f85..c0908c22548 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
-u8 ata_sff_altstatus(struct ata_port *ap)
+static u8 ata_sff_altstatus(struct ata_port *ap)
{
if (ap->ops->sff_check_altstatus)
return ap->ops->sff_check_altstatus(ap);
@@ -256,6 +256,93 @@ u8 ata_sff_altstatus(struct ata_port *ap)
}
/**
+ * ata_sff_irq_status - Check if the device is busy
+ * @ap: port where the device is
+ *
+ * Determine if the port is currently busy. Uses altstatus
+ * if available in order to avoid clearing shared IRQ status
+ * when finding an IRQ source. Non ctl capable devices don't
+ * share interrupt lines fortunately for us.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static u8 ata_sff_irq_status(struct ata_port *ap)
+{
+ u8 status;
+
+ if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+ status = ata_sff_altstatus(ap);
+ /* Not us: We are busy */
+ if (status & ATA_BUSY)
+ return status;
+ }
+ /* Clear INTRQ latch */
+ status = ap->ops->sff_check_status(ap);
+ return status;
+}
+
+/**
+ * ata_sff_sync - Flush writes
+ * @ap: Port to wait for.
+ *
+ * CAUTION:
+ * If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+static void ata_sff_sync(struct ata_port *ap)
+{
+ if (ap->ops->sff_check_altstatus)
+ ap->ops->sff_check_altstatus(ap);
+ else if (ap->ioaddr.altstatus_addr)
+ ioread8(ap->ioaddr.altstatus_addr);
+}
+
+/**
+ * ata_sff_pause - Flush writes and wait 400nS
+ * @ap: Port to pause for.
+ *
+ * CAUTION:
+ * If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
+void ata_sff_pause(struct ata_port *ap)
+{
+ ata_sff_sync(ap);
+ ndelay(400);
+}
+
+/**
+ * ata_sff_dma_pause - Pause before commencing DMA
+ * @ap: Port to pause for.
+ *
+ * Perform I/O fencing and ensure sufficient cycle delays occur
+ * for the HDMA1:0 transition
+ */
+
+void ata_sff_dma_pause(struct ata_port *ap)
+{
+ if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
+ /* An altstatus read will cause the needed delay without
+ messing up the IRQ status */
+ ata_sff_altstatus(ap);
+ return;
+ }
+ /* There are no DMA controllers without ctl. BUG here to ensure
+ we never violate the HDMA1:0 transition timing and risk
+ corruption. */
+ BUG();
+}
+
+/**
* ata_sff_busy_sleep - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled
* @tmout_pat: impatience timeout
@@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
} else
ata_pio_sector(qc);
- ata_sff_altstatus(qc->ap); /* flush */
+ ata_sff_sync(qc->ap); /* flush */
}
/**
@@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
WARN_ON(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
- ata_sff_altstatus(ap); /* flush */
-
+ ata_sff_sync(ap);
+ /* FIXME: If the CDB is for DMA do we need to do the transition delay
+ or is bmdma_start guaranteed to do it ? */
switch (qc->tf.protocol) {
case ATAPI_PROT_PIO:
ap->hsm_task_state = HSM_ST;
@@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out;
- ata_sff_altstatus(ap); /* flush */
+ ata_sff_sync(ap); /* flush */
return;
@@ -1006,6 +1094,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq)
{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags = 0;
int poll_next;
@@ -1037,9 +1126,12 @@ fsm_start:
if (likely(status & (ATA_ERR | ATA_DF)))
/* device stops HSM for abort/error */
qc->err_mask |= AC_ERR_DEV;
- else
+ else {
/* HSM violation. Let EH handle this */
+ ata_ehi_push_desc(ehi,
+ "ST_FIRST: !(DRQ|ERR|DF)");
qc->err_mask |= AC_ERR_HSM;
+ }
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
@@ -1058,9 +1150,9 @@ fsm_start:
* the CDB.
*/
if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
- ata_port_printk(ap, KERN_WARNING,
- "DRQ=1 with device error, "
- "dev_stat 0x%X\n", status);
+ ata_ehi_push_desc(ehi, "ST_FIRST: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
@@ -1117,9 +1209,9 @@ fsm_start:
* let the EH abort the command or reset the device.
*/
if (unlikely(status & (ATA_ERR | ATA_DF))) {
- ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
- "device error, dev_stat 0x%X\n",
- status);
+ ata_ehi_push_desc(ehi, "ST-ATAPI: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
@@ -1138,13 +1230,17 @@ fsm_start:
if (likely(status & (ATA_ERR | ATA_DF)))
/* device stops HSM for abort/error */
qc->err_mask |= AC_ERR_DEV;
- else
+ else {
/* HSM violation. Let EH handle this.
* Phantom devices also trigger this
* condition. Mark hint.
*/
+ ata_ehi_push_desc(ehi, "ST-ATA: "
+ "DRQ=1 with device error, "
+ "dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM |
AC_ERR_NODEV_HINT;
+ }
ap->hsm_task_state = HSM_ST_ERR;
goto fsm_start;
@@ -1169,8 +1265,12 @@ fsm_start:
status = ata_wait_idle(ap);
}
- if (status & (ATA_BUSY | ATA_DRQ))
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ ata_ehi_push_desc(ehi, "ST-ATA: "
+ "BUSY|DRQ persists on ERR|DF, "
+ "dev_stat 0x%X", status);
qc->err_mask |= AC_ERR_HSM;
+ }
/* ata_pio_sectors() might change the
* state to HSM_ST_LAST. so, the state
@@ -1489,14 +1589,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap,
goto idle_irq;
}
- /* check altstatus */
- status = ata_sff_altstatus(ap);
- if (status & ATA_BUSY)
- goto idle_irq;
- /* check main status, clearing INTRQ */
- status = ap->ops->sff_check_status(ap);
- if (unlikely(status & ATA_BUSY))
+ /* check main status, clearing INTRQ if needed */
+ status = ata_sff_irq_status(ap);
+ if (status & ATA_BUSY)
goto idle_irq;
/* ack bmdma irq events */
@@ -2030,7 +2126,7 @@ void ata_sff_error_handler(struct ata_port *ap)
ap->ops->bmdma_stop(qc);
}
- ata_sff_altstatus(ap);
+ ata_sff_sync(ap); /* FIXME: We don't need this */
ap->ops->sff_check_status(ap);
ap->ops->sff_irq_clear(ap);
@@ -2203,7 +2299,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
mmio + ATA_DMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_sff_altstatus(ap); /* dummy read */
+ ata_sff_dma_pause(ap);
}
/**
@@ -2722,7 +2818,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
EXPORT_SYMBOL_GPL(ata_sff_dev_select);
EXPORT_SYMBOL_GPL(ata_sff_check_status);
-EXPORT_SYMBOL_GPL(ata_sff_altstatus);
+EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
+EXPORT_SYMBOL_GPL(ata_sff_pause);
EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
EXPORT_SYMBOL_GPL(ata_sff_tf_load);