summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c148
1 files changed, 106 insertions, 42 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index e607568bce4..76093152959 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2013 QLogic Corporation
+ * Copyright (c) 2003-2014 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
return 1;
}
-static int
+int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
- uint32_t prot_int;
+ uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
dma_addr_t sle_dma;
uint32_t sle_dma_len, tot_prot_dma_len = 0;
- struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-
- prot_int = cmd->device->sector_size;
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
memset(&sgx, 0, sizeof(struct qla2_sgx));
- sgx.tot_bytes = scsi_bufflen(cmd);
- sgx.cur_sg = scsi_sglist(cmd);
- sgx.sp = sp;
-
- sg_prot = scsi_prot_sglist(cmd);
+ if (sp) {
+ vha = sp->fcport->vha;
+ cmd = GET_CMD_SP(sp);
+ prot_int = cmd->device->sector_size;
+
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ sg_prot = scsi_prot_sglist(cmd);
+ } else if (tc) {
+ vha = tc->vha;
+ prot_int = tc->blk_sz;
+ sgx.tot_bytes = tc->bufflen;
+ sgx.cur_sg = tc->sg;
+ sg_prot = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
@@ -995,10 +1009,18 @@ alloc_and_fill:
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1033,21 +1055,35 @@ alloc_and_fill:
return 0;
}
-static int
+int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds)
+ uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
- struct scatterlist *sg;
+ struct scatterlist *sg, *sgl;
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
- struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_cmnd *cmd;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ sgl = tc->sg;
+ vha = tc->vha;
+ } else {
+ BUG();
+ return 1;
+ }
- scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
return 0;
}
-static int
+int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd,
- uint16_t tot_dsds)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
- struct scatterlist *sg;
+ struct scatterlist *sg, *sgl;
int i;
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
- uint16_t used_dsds = tot_dsds;
+ uint16_t used_dsds = tot_dsds;
+ struct scsi_qla_host *vha;
+
+ if (sp) {
+ cmd = GET_CMD_SP(sp);
+ sgl = scsi_prot_sglist(cmd);
+ vha = sp->fcport->vha;
+ } else if (tc) {
+ vha = tc->vha;
+ sgl = tc->prot_sg;
+ } else {
+ BUG();
+ return 1;
+ }
- cmd = GET_CMD_SP(sp);
- scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "%s: enter\n", __func__);
+
+ for_each_sg(sgl, sg, tot_dsds, i) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
return 1;
}
- list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+ if (sp) {
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)
+ sp->u.scmd.ctx)->dsd_list);
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+ } else {
+ list_add_tail(&dsd_ptr->list,
+ &(tc->ctx->dsd_list));
+ tc->ctx_dsd_alloced = 1;
+ }
/* add new list to cmd iocb or last list */
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
if (!bundling && tot_prot_dsds) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
- cur_dsd, tot_dsds))
+ cur_dsd, tot_dsds, NULL))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
- (tot_dsds - tot_prot_dsds)))
+ (tot_dsds - tot_prot_dsds), NULL))
goto crc_queuing_error;
if (bundling && tot_prot_dsds) {
@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
- tot_prot_dsds))
+ tot_prot_dsds, NULL))
goto crc_queuing_error;
}
return QLA_SUCCESS;
@@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else