diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 310 |
1 files changed, 224 insertions, 86 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dc249cb970e..ba93d6e66d4 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -63,39 +63,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { }; #undef SP - -/* - * Function: scsi_insert_special_req() - * - * Purpose: Insert pre-formed request into request queue. - * - * Arguments: sreq - request that is ready to be queued. - * at_head - boolean. True if we should insert at head - * of queue, false if we should insert at tail. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: Nothing - * - * Notes: This function is called from character device and from - * ioctl types of functions where the caller knows exactly - * what SCSI command needs to be issued. The idea is that - * we merely inject the command into the queue (at the head - * for now), and then call the queue request function to actually - * process it. - */ -int scsi_insert_special_req(struct scsi_request *sreq, int at_head) -{ - /* - * Because users of this function are apt to reuse requests with no - * modification, we have to sanitise the request flags here - */ - sreq->sr_request->flags &= ~REQ_DONTPREP; - blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, - at_head, sreq); - return 0; -} - static void scsi_run_queue(struct request_queue *q); /* @@ -249,8 +216,13 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd, /* * head injection *required* here otherwise quiesce won't work + * + * Because users of this function are apt to reuse requests with no + * modification, we have to sanitise the request flags here */ - scsi_insert_special_req(sreq, 1); + sreq->sr_request->flags &= ~REQ_DONTPREP; + blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, + 1, sreq); } EXPORT_SYMBOL(scsi_do_req); @@ -287,6 +259,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, memcpy(req->cmd, cmd, req->cmd_len); req->sense = sense; req->sense_len = 0; + req->retries = retries; req->timeout = timeout; req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; @@ -327,6 +300,200 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute_req); +struct scsi_io_context { + void *data; + void (*done)(void *data, char *sense, int result, int resid); + char sense[SCSI_SENSE_BUFFERSIZE]; +}; + +static kmem_cache_t *scsi_io_context_cache; + +static void scsi_end_async(struct request *req, int uptodate) +{ + struct scsi_io_context *sioc = req->end_io_data; + + if (sioc->done) + sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); + + kmem_cache_free(scsi_io_context_cache, sioc); + __blk_put_request(req->q, req); +} + +static int scsi_merge_bio(struct request *rq, struct bio *bio) +{ + struct request_queue *q = rq->q; + + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + if (rq_data_dir(rq) == WRITE) + bio->bi_rw |= (1 << BIO_RW); + blk_queue_bounce(q, &bio); + + if (!rq->bio) + blk_rq_bio_prep(q, rq, bio); + else if (!q->back_merge_fn(q, rq, bio)) + return -EINVAL; + else { + rq->biotail->bi_next = bio; + rq->biotail = bio; + rq->hard_nr_sectors += bio_sectors(bio); + rq->nr_sectors = rq->hard_nr_sectors; + } + + return 0; +} + +static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) +{ + if (bio->bi_size) + return 1; + + bio_put(bio); + return 0; +} + +/** + * scsi_req_map_sg - map a scatterlist into a request + * @rq: request to fill + * @sg: scatterlist + * @nsegs: number of elements + * @bufflen: len of buffer + * @gfp: memory allocation flags + * + * scsi_req_map_sg maps a scatterlist into a request so that the + * request can be sent to the block layer. We do not trust the scatterlist + * sent to use, as some ULDs use that struct to only organize the pages. + */ +static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, + int nsegs, unsigned bufflen, gfp_t gfp) +{ + struct request_queue *q = rq->q; + int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned int data_len = 0, len, bytes, off; + struct page *page; + struct bio *bio = NULL; + int i, err, nr_vecs = 0; + + for (i = 0; i < nsegs; i++) { + page = sgl[i].page; + off = sgl[i].offset; + len = sgl[i].length; + data_len += len; + + while (len > 0) { + bytes = min_t(unsigned int, len, PAGE_SIZE - off); + + if (!bio) { + nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); + nr_pages -= nr_vecs; + + bio = bio_alloc(gfp, nr_vecs); + if (!bio) { + err = -ENOMEM; + goto free_bios; + } + bio->bi_end_io = scsi_bi_endio; + } + + if (bio_add_pc_page(q, bio, page, bytes, off) != + bytes) { + bio_put(bio); + err = -EINVAL; + goto free_bios; + } + + if (bio->bi_vcnt >= nr_vecs) { + err = scsi_merge_bio(rq, bio); + if (err) { + bio_endio(bio, bio->bi_size, 0); + goto free_bios; + } + bio = NULL; + } + + page++; + len -= bytes; + off = 0; + } + } + + rq->buffer = rq->data = NULL; + rq->data_len = data_len; + return 0; + +free_bios: + while ((bio = rq->bio) != NULL) { + rq->bio = bio->bi_next; + /* + * call endio instead of bio_put incase it was bounced + */ + bio_endio(bio, bio->bi_size, 0); + } + + return err; +} + +/** + * scsi_execute_async - insert request + * @sdev: scsi device + * @cmd: scsi command + * @data_direction: data direction + * @buffer: data buffer (this can be a kernel buffer or scatterlist) + * @bufflen: len of buffer + * @use_sg: if buffer is a scatterlist this is the number of elements + * @timeout: request timeout in seconds + * @retries: number of times to retry request + * @flags: or into request flags + **/ +int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, + int data_direction, void *buffer, unsigned bufflen, + int use_sg, int timeout, int retries, void *privdata, + void (*done)(void *, char *, int, int), gfp_t gfp) +{ + struct request *req; + struct scsi_io_context *sioc; + int err = 0; + int write = (data_direction == DMA_TO_DEVICE); + + sioc = kmem_cache_alloc(scsi_io_context_cache, gfp); + if (!sioc) + return DRIVER_ERROR << 24; + memset(sioc, 0, sizeof(*sioc)); + + req = blk_get_request(sdev->request_queue, write, gfp); + if (!req) + goto free_sense; + req->flags |= REQ_BLOCK_PC | REQ_QUIET; + + if (use_sg) + err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); + else if (bufflen) + err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); + + if (err) + goto free_req; + + req->cmd_len = COMMAND_SIZE(cmd[0]); + memcpy(req->cmd, cmd, req->cmd_len); + req->sense = sioc->sense; + req->sense_len = 0; + req->timeout = timeout; + req->retries = retries; + req->end_io_data = sioc; + + sioc->data = privdata; + sioc->done = done; + + blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); + return 0; + +free_req: + blk_put_request(req); +free_sense: + kfree(sioc); + return DRIVER_ERROR << 24; +} +EXPORT_SYMBOL_GPL(scsi_execute_async); + /* * Function: scsi_init_cmd_errh() * @@ -624,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, spin_lock_irqsave(q->queue_lock, flags); if (blk_rq_tagged(req)) blk_queue_end_tag(q, req); - end_that_request_last(req); + end_that_request_last(req, uptodate); spin_unlock_irqrestore(q->queue_lock, flags); /* @@ -765,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, int sense_valid = 0; int sense_deferred = 0; - if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) - return; - /* * Free up any indirection buffers we allocated for DMA purposes. * For the case of a READ, we need to copy the data out of the @@ -884,7 +1048,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, * system where READ CAPACITY failed, we may have read * past the end of the disk. */ - if (cmd->device->use_10_for_rw && + if ((cmd->device->use_10_for_rw && + sshdr.asc == 0x20 && sshdr.ascq == 0x00) && (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10)) { cmd->device->use_10_for_rw = 0; @@ -1031,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd) return BLKPREP_KILL; } -static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct scsi_driver *drv; - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - - if (drv->prepare_flush) - return drv->prepare_flush(q, rq); - } - - return 0; -} - -static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) -{ - struct scsi_device *sdev = q->queuedata; - struct request *flush_rq = rq->end_io_data; - struct scsi_driver *drv; - - if (flush_rq->errors) { - printk("scsi: barrier error, disabling flush support\n"); - blk_queue_ordered(q, QUEUE_ORDERED_NONE); - } - - if (sdev->sdev_state == SDEV_RUNNING) { - drv = *(struct scsi_driver **) rq->rq_disk->private_data; - drv->end_flush(q, rq); - } -} - static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, sector_t *error_sector) { @@ -1082,10 +1215,16 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, static void scsi_generic_done(struct scsi_cmnd *cmd) { BUG_ON(!blk_pc_request(cmd->request)); - scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0); + /* + * This will complete the whole command with uptodate=1 so + * as far as the block layer is concerned the command completed + * successfully. Since this is a REQ_BLOCK_PC command the + * caller should check the request's errors value + */ + scsi_io_completion(cmd, cmd->bufflen, 0); } -void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries) +void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) { struct request *req = cmd->request; @@ -1100,7 +1239,7 @@ void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd, int retries) cmd->sc_data_direction = DMA_FROM_DEVICE; cmd->transfersize = req->data_len; - cmd->allowed = retries; + cmd->allowed = req->retries; cmd->timeout_per_command = req->timeout; } EXPORT_SYMBOL_GPL(scsi_setup_blk_pc_cmnd); @@ -1240,7 +1379,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) goto kill; } } else { - scsi_setup_blk_pc_cmnd(cmd, 3); + scsi_setup_blk_pc_cmnd(cmd); cmd->done = scsi_generic_done; } } @@ -1529,17 +1668,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); - /* - * ordered tags are superior to flush ordering - */ - if (shost->ordered_tag) - blk_queue_ordered(q, QUEUE_ORDERED_TAG); - else if (shost->ordered_flush) { - blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); - q->prepare_flush_fn = scsi_prepare_flush_fn; - q->end_flush_fn = scsi_end_flush_fn; - } - if (!shost->use_clustering) clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); return q; @@ -1603,6 +1731,14 @@ int __init scsi_init_queue(void) { int i; + scsi_io_context_cache = kmem_cache_create("scsi_io_context", + sizeof(struct scsi_io_context), + 0, 0, NULL, NULL); + if (!scsi_io_context_cache) { + printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); + return -ENOMEM; + } + for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; int size = sgp->size * sizeof(struct scatterlist); @@ -1630,6 +1766,8 @@ void scsi_exit_queue(void) { int i; + kmem_cache_destroy(scsi_io_context_cache); + for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; mempool_destroy(sgp->pool); |