summaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ide/ide-io.c')
-rw-r--r--drivers/ide/ide-io.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 9e9cf140731..ecfafcdafea 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1101,6 +1101,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
ide_hwif_t *hwif;
struct request *rq;
ide_startstop_t startstop;
+ int loops = 0;
/* for atari only: POSSIBLY BROKEN HERE(?) */
ide_get_lock(ide_intr, hwgroup);
@@ -1153,6 +1154,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
/* no more work for this hwgroup (for now) */
return;
}
+ again:
hwif = HWIF(drive);
if (hwgroup->hwif->sharing_irq &&
hwif != hwgroup->hwif &&
@@ -1192,8 +1194,14 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
* though. I hope that doesn't happen too much, hopefully not
* unless the subdriver triggers such a thing in its own PM
* state machine.
+ *
+ * We count how many times we loop here to make sure we service
+ * all drives in the hwgroup without looping for ever
*/
if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) {
+ drive = drive->next ? drive->next : hwgroup->drive;
+ if (loops++ < 4 && !blk_queue_plugged(drive->queue))
+ goto again;
/* We clear busy, there should be no pending ATA command at this point. */
hwgroup->busy = 0;
break;
@@ -1621,12 +1629,6 @@ EXPORT_SYMBOL(ide_init_drive_cmd);
* for the new rq to be completed. This is VERY DANGEROUS, and is
* intended for careful use by the ATAPI tape/cdrom driver code.
*
- * If action is ide_next, then the rq is queued immediately after
- * the currently-being-processed-request (if any), and the function
- * returns without waiting for the new rq to be completed. As above,
- * This is VERY DANGEROUS, and is intended for careful use by the
- * ATAPI tape/cdrom driver code.
- *
* If action is ide_end, then the rq is queued at the end of the
* request queue, and the function returns immediately without waiting
* for the new rq to be completed. This is again intended for careful