diff options
author | James Bottomley <JBottomley@Parallels.com> | 2013-07-02 15:05:26 +0200 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2013-07-09 12:14:08 +0100 |
commit | e2eb7244bc9e4fd130fc8a961224968e22ba48ee (patch) | |
tree | a1ba31c44721a9a106967e16b6065d4b98c0abeb | |
parent | fec3c1b4575431e2020c5c6502d18b281925ca45 (diff) |
[SCSI] Fix race between starved list and device removal
scsi_run_queue() examines all SCSI devices that are present on
the starved list. Since scsi_run_queue() unlocks the SCSI host
lock a SCSI device can get removed after it has been removed
from the starved list and before its queue is run. Protect
against that race condition by holding a reference on the
queue while running it.
Reported-by: Chanho Min <chanho.min@lge.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r-- | drivers/scsi/scsi_lib.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 86d522004a2..df8bd5ab3c0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -434,6 +434,8 @@ static void scsi_run_queue(struct request_queue *q) list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { + struct request_queue *slq; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -456,11 +458,25 @@ static void scsi_run_queue(struct request_queue *q) continue; } - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - __blk_run_queue(sdev->request_queue); - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_cleanup_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + blk_run_queue(slq); + blk_put_queue(slq); + + spin_lock_irqsave(shost->host_lock, flags); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); |