diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 82 |
1 files changed, 54 insertions, 28 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd2..6d5c7ff43f5 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -67,6 +67,13 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { struct kmem_cache *scsi_sdb_cache; +/* + * When to reinvoke queueing after a resource shortage. It's 3 msecs to + * not change behaviour from the previous unplug mechanism, experimentation + * may prove this needs changing. + */ +#define SCSI_QUEUE_DELAY 3 + static void scsi_run_queue(struct request_queue *q); /* @@ -149,14 +156,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) /* * Requeue this command. It will go before all other commands * that are already in the queue. - * - * NOTE: there is magic here about the way the queue is plugged if - * we have no outstanding commands. - * - * Although we *don't* plug the queue, we call the request - * function. The SCSI request function detects the blocked condition - * and plugs the queue appropriately. - */ + */ spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); spin_unlock_irqrestore(q->queue_lock, flags); @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) &sdev->request_queue->queue_flags); if (flagset) queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); - __blk_run_queue(sdev->request_queue); + __blk_run_queue(sdev->request_queue, false); if (flagset) queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); spin_unlock(sdev->request_queue->queue_lock); @@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) } EXPORT_SYMBOL(scsi_release_buffers); +static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) +{ + int error = 0; + + switch(host_byte(result)) { + case DID_TRANSPORT_FAILFAST: + error = -ENOLINK; + break; + case DID_TARGET_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EREMOTEIO; + break; + case DID_NEXUS_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EBADE; + break; + default: + error = -EIO; + break; + } + + return error; +} + /* * Function: scsi_io_completion() * @@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->sense_len = len; } if (!sense_deferred) - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); } req->resid_len = scsi_get_resid(cmd); @@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); if (host_byte(result) == DID_RESET) { /* Third party bus reset or reset for error recovery @@ -843,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) description = "Host Data Integrity Failure"; action = ACTION_FAIL; error = -EILSEQ; + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && + (cmd->cmnd[0] == UNMAP || + cmd->cmnd[0] == WRITE_SAME_16 || + cmd->cmnd[0] == WRITE_SAME)) { + description = "Discard failure"; + action = ACTION_FAIL; } else action = ACTION_FAIL; break; @@ -1038,6 +1069,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, cmd->request = req; cmd->cmnd = req->cmd; + cmd->prot_op = SCSI_PROT_NORMAL; return cmd; } @@ -1194,11 +1226,11 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) case BLKPREP_DEFER: /* * If we defer, the blk_peek_request() returns NULL, but the - * queue must be restarted, so we plug here if no returning - * command will automatically do that. + * queue must be restarted, so we schedule a callback to happen + * shortly. */ if (sdev->device_busy == 0) - blk_plug_device(q); + blk_delay_queue(q, SCSI_QUEUE_DELAY); break; default: req->cmd_flags |= REQ_DONTPREP; @@ -1237,7 +1269,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, sdev_printk(KERN_INFO, sdev, "unblocking device at zero depth\n")); } else { - blk_plug_device(q); + blk_delay_queue(q, SCSI_QUEUE_DELAY); return 0; } } @@ -1467,7 +1499,7 @@ static void scsi_request_fn(struct request_queue *q) * the host is no longer able to accept any more requests. */ shost = sdev->host; - while (!blk_queue_plugged(q)) { + for (;;) { int rtn; /* * get next queueable request. We do this early to make sure @@ -1546,15 +1578,8 @@ static void scsi_request_fn(struct request_queue *q) */ rtn = scsi_dispatch_cmd(cmd); spin_lock_irq(q->queue_lock); - if(rtn) { - /* we're refusing the command; because of - * the way locks get dropped, we need to - * check here if plugging is required */ - if(sdev->device_busy == 0) - blk_plug_device(q); - - break; - } + if (rtn) + goto out_delay; } goto out; @@ -1573,9 +1598,10 @@ static void scsi_request_fn(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_requeue_request(q, req); sdev->device_busy--; - if(sdev->device_busy == 0) - blk_plug_device(q); - out: +out_delay: + if (sdev->device_busy == 0) + blk_delay_queue(q, SCSI_QUEUE_DELAY); +out: /* must be careful here...if we trigger the ->remove() function * we cannot be holding the q lock */ spin_unlock_irq(q->queue_lock); |