summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2006-11-16 19:24:10 +0900
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-25 13:08:56 -0600
commitb58d91547fb17c65ad621f3f98b1f2c228c812a5 (patch)
tree4fafd4db96cb7931577f87b02a79cc6f52986333 /drivers
parent84ad58e4efcf80c154f693d4cc8f5c913511b760 (diff)
[SCSI] export scsi-ml functions needed by tgt_scsi_lib and its LLDs
This patch contains the needed changes to the scsi-ml for the target mode support. Note, per the last review we moved almost all the fields we added to the scsi_cmnd to our internal data structure which we are going to try and kill off when we can replace it with support from other parts of the kernel. The one field we left on was the offset variable. This is needed to handle the case where the target gets request that is so large that it cannot execute it in one dma operation. So max_secotors or a segment limit may limit the size of the transfer. In this case our tgt core code will break up the command into managable transfers and send them to the LLD one at a time. The offset is then used to tell the LLD where in the command we are at. Is there another field on the scsi_cmd for that? Signed-off-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/scsi.c43
-rw-r--r--drivers/scsi/scsi_lib.c33
3 files changed, 54 insertions, 26 deletions
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 2ffdc9e0532..38c3a291efa 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler);
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ if (shost->uspace_req_q) {
+ kfree(shost->uspace_req_q->queuedata);
+ scsi_free_queue(shost->uspace_req_q);
+ }
scsi_destroy_command_freelist(shost);
if (shost->bqt)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 780d6dc92b4..fafc00deaad 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DEFINE_MUTEX(host_cmd_pool_mutex);
-static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
- gfp_t gfp_mask)
+struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd;
@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
return cmd;
}
+EXPORT_SYMBOL_GPL(__scsi_get_command);
/*
* Function: scsi_get_command()
@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
put_device(&dev->sdev_gendev);
return cmd;
-}
+}
EXPORT_SYMBOL(scsi_get_command);
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
+ struct device *dev)
+{
+ unsigned long flags;
+
+ /* changing locks here, don't need to restore the irq state */
+ spin_lock_irqsave(&shost->free_list_lock, flags);
+ if (unlikely(list_empty(&shost->free_list))) {
+ list_add(&cmd->list, &shost->free_list);
+ cmd = NULL;
+ }
+ spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+ if (likely(cmd != NULL))
+ kmem_cache_free(shost->cmd_pool->slab, cmd);
+
+ put_device(dev);
+}
+EXPORT_SYMBOL(__scsi_put_command);
+
/*
* Function: scsi_put_command()
*
@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
void scsi_put_command(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
unsigned long flags;
-
+
/* serious error if the command hasn't come from a device list */
spin_lock_irqsave(&cmd->device->list_lock, flags);
BUG_ON(list_empty(&cmd->list));
list_del_init(&cmd->list);
- spin_unlock(&cmd->device->list_lock);
- /* changing locks here, don't need to restore the irq state */
- spin_lock(&shost->free_list_lock);
- if (unlikely(list_empty(&shost->free_list))) {
- list_add(&cmd->list, &shost->free_list);
- cmd = NULL;
- }
- spin_unlock_irqrestore(&shost->free_list_lock, flags);
+ spin_unlock_irqrestore(&cmd->device->list_lock, flags);
- if (likely(cmd != NULL))
- kmem_cache_free(shost->cmd_pool->slab, cmd);
-
- put_device(&sdev->sdev_gendev);
+ __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_put_command);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2f12f9f12fc..fb616c69151 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
return NULL;
}
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl;
@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
return sgl;
}
-static void scsi_free_sgtable(struct scatterlist *sgl, int index)
+EXPORT_SYMBOL(scsi_alloc_sgtable);
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
{
struct scsi_host_sg_pool *sgp;
@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
mempool_free(sgl, sgp->pool);
}
+EXPORT_SYMBOL(scsi_free_sgtable);
+
/*
* Function: scsi_release_buffers()
*
@@ -1567,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_calculate_bounce_limit);
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+ request_fn_proc *request_fn)
{
- struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
- q = blk_init_queue(scsi_request_fn, NULL);
+ q = blk_init_queue(request_fn, NULL);
if (!q)
return NULL;
- blk_queue_prep_rq(q, scsi_prep_fn);
-
blk_queue_max_hw_segments(q, shost->sg_tablesize);
blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
- blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
- blk_queue_softirq_done(q, scsi_softirq_done);
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q;
}
+EXPORT_SYMBOL(__scsi_alloc_queue);
+
+struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+{
+ struct request_queue *q;
+
+ q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+ if (!q)
+ return NULL;
+
+ blk_queue_prep_rq(q, scsi_prep_fn);
+ blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+ blk_queue_softirq_done(q, scsi_softirq_done);
+ return q;
+}
void scsi_free_queue(struct request_queue *q)
{