summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/virtio_scsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r--drivers/scsi/virtio_scsi.c91
1 files changed, 18 insertions, 73 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index db3b494e592..99fdb940394 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -73,17 +73,12 @@ struct virtio_scsi_vq {
* queue, and also lets the driver optimize the IRQ affinity for the virtqueues
* (each virtqueue's affinity is set to the CPU that "owns" the queue).
*
- * An interesting effect of this policy is that only writes to req_vq need to
- * take the tgt_lock. Read can be done outside the lock because:
+ * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
+ * could be done locklessly, but we do not do it yet.
*
- * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
- * In that case, no other CPU is reading req_vq: even if they were in
- * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
- *
- * - reads of req_vq only occur when the target is not idle (reqs != 0).
- * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
- *
- * Similarly, decrements of reqs are never concurrent with writes of req_vq.
+ * Decrements of reqs are never concurrent with writes of req_vq: before the
+ * decrement reqs will be != 0; after the decrement the virtqueue completion
+ * routine will not use the req_vq so it can be changed by a new request.
* Thus they can happen outside the tgt_lock, provided of course we make reqs
* an atomic_t.
*/
@@ -204,7 +199,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_driver_byte(sc, DRIVER_SENSE);
}
- mempool_free(cmd, virtscsi_cmd_pool);
sc->scsi_done(sc);
atomic_dec(&tgt->reqs);
@@ -238,38 +232,6 @@ static void virtscsi_req_done(struct virtqueue *vq)
int index = vq->index - VIRTIO_SCSI_VQ_BASE;
struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
- /*
- * Read req_vq before decrementing the reqs field in
- * virtscsi_complete_cmd.
- *
- * With barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read req_vq
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- *
- * Possible reordering without barriers:
- *
- * CPU #0 virtscsi_queuecommand_multi (CPU #1)
- * ------------------------------------------------------------
- * lock vq_lock
- * read reqs (reqs = 1)
- * write reqs (reqs = 0)
- * increment reqs (reqs = 1)
- * write req_vq
- * read (wrong) req_vq
- *
- * We do not need a full smp_rmb, because req_vq is required to get
- * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
- * in the virtqueue as the user token.
- */
- smp_read_barrier_depends();
-
virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
};
@@ -279,8 +241,6 @@ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
if (cmd->comp)
complete_all(cmd->comp);
- else
- mempool_free(cmd, virtscsi_cmd_pool);
}
static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -433,11 +393,10 @@ static void virtscsi_event_done(struct virtqueue *vq)
* @cmd : command structure
* @req_size : size of the request buffer
* @resp_size : size of the response buffer
- * @gfp : flags to use for memory allocations
*/
static int virtscsi_add_cmd(struct virtqueue *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size)
{
struct scsi_cmnd *sc = cmd->sc;
struct scatterlist *sgs[4], req, resp;
@@ -469,19 +428,19 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
if (in)
sgs[out_num + in_num++] = in->sgl;
- return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
+ return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size, gfp_t gfp)
+ size_t req_size, size_t resp_size)
{
unsigned long flags;
int err;
bool needs_kick = false;
spin_lock_irqsave(&vq->vq_lock, flags);
- err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
+ err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
if (!err)
needs_kick = virtqueue_kick_prepare(vq->vq);
@@ -496,10 +455,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
struct virtio_scsi_vq *req_vq,
struct scsi_cmnd *sc)
{
- struct virtio_scsi_cmd *cmd;
- int ret;
-
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
/* TODO: check feature bit and fail if unsupported? */
@@ -508,11 +466,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
dev_dbg(&sc->device->sdev_gendev,
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
- ret = SCSI_MLQUEUE_HOST_BUSY;
- cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
- if (!cmd)
- goto out;
-
memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
cmd->req.cmd = (struct virtio_scsi_cmd_req){
@@ -530,14 +483,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
if (virtscsi_kick_cmd(req_vq, cmd,
- sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) == 0)
- ret = 0;
- else
- mempool_free(cmd, virtscsi_cmd_pool);
-
-out:
- return ret;
+ sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ return 0;
}
static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
@@ -560,12 +508,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
spin_lock_irqsave(&tgt->tgt_lock, flags);
- /*
- * The memory barrier after atomic_inc_return matches
- * the smp_read_barrier_depends() in virtscsi_req_done.
- */
if (atomic_inc_return(&tgt->reqs) > 1)
- vq = ACCESS_ONCE(tgt->req_vq);
+ vq = tgt->req_vq;
else {
queue_num = smp_processor_id();
while (unlikely(queue_num >= vscsi->num_queues))
@@ -596,8 +540,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
cmd->comp = ∁
if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
- GFP_NOIO) < 0)
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
goto out;
wait_for_completion(&comp);
@@ -683,6 +626,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_single,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
@@ -699,6 +643,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.name = "Virtio SCSI HBA",
.proc_name = "virtio_scsi",
.this_id = -1,
+ .cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_multi,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,