diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 17:37:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 17:37:09 -0700 |
commit | c9bfa7d75ba7269c97595f03c3152372e8b37823 (patch) | |
tree | 18ff08a09721a21fda585c716574344220a50518 | |
parent | f4c16c581766a230c02bec4d513b09fe36264ae2 (diff) | |
parent | f80e8ed3951455272c12693e35b259be8eb60b30 (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull scsi-target changes from Nicholas Bellinger:
"There has been lots of work in existing code in a number of areas this
past cycle. The major highlights have been:
* Removal of transport_do_task_sg_chain() from core + fabrics
(Roland)
* target-core: Removal of se_task abstraction from target-core and
enforce hw_max_sectors for pSCSI backends (hch)
* Re-factoring of iscsi-target tx immediate/response queues (agrover)
* Conversion of iscsi-target back to using target core memory
allocation logic (agrover)
We've had one last minute iscsi-target patch go into for-next to
address a nasty regression bug related to the target core allocation
logic conversion from agrover that is not included in friday's
linux-next build, but has been included in this series.
On the new fabric module code front for-3.5, here is a brief status
update for the three currently in flight this round:
* usb-gadget target driver:
Sebastian Siewior's driver for supporting usb-gadget target mode
operation. This will be going out as a separate PULL request from
target-pending/usb-target-merge with subsystem maintainer ACKs. There
is one minor target-core patch in this series required to function.
* sbp ieee-1394/firewire target driver:
Chris Boot's driver for supportting the Serial Block Protocol (SBP)
across IEEE-1394 Firewire hardware. This will be going out as a
separate PULL request from target-pending/sbp-target-merge with two
additional drivers/firewire/ patches w/ subsystem maintainer ACKs.
* qla2xxx LLD target mode infrastructure changes + tcm_qla2xxx:
The Qlogic >= 24xx series HW target mode LLD infrastructure patch-set
and tcm_qla2xxx fabric driver. Support for FC target mode using
qla2xxx LLD code has been officially submitted by Qlogic to James
below, and is currently outstanding but not yet merged into
scsi.git/for-next..
[PATCH 00/22] qla2xxx: Updates for scsi "misc" branch
http://www.spinics.net/lists/linux-scsi/msg59350.html
Note there are *zero* direct dependencies upon this for-next series
for the qla2xxx LLD target + tcm_qla2xxx patches submitted above, and
over the last days the target mode team has been tracking down an
tcm_qla2xxx specific active I/O shutdown bug that appears to now be
almost squashed for 3.5-rc-fixes."
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (47 commits)
iscsi-target: Fix iov_count calculation bug in iscsit_allocate_iovecs
iscsi-target: remove dead code in iscsi_check_valuelist_for_support
target: Handle ATA_16 passthrough for pSCSI backend devices
target: Add MI_REPORT_TARGET_PGS ext. header + implict_trans_secs attribute
target: Fix MAINTENANCE_IN service action CDB checks to use lower 5 bits
target: add support for the WRITE_VERIFY command
target: make target_put_session void
target: cleanup transport_execute_tasks()
target: Remove max_sectors device attribute for modern se_task less code
target: lock => unlock typo in transport_lun_wait_for_tasks
target: Enforce hw_max_sectors for SCF_SCSI_DATA_SG_IO_CDB
target: remove the t_se_count field in struct se_cmd
target: remove the t_task_cdbs_ex_left field in struct se_cmd
target: remove the t_task_cdbs_left field in struct se_cmd
target: remove struct se_task
target: move the state and execute lists to the command
target: simplify command to task linkage
target: always allocate a single task
target: replace ->execute_task with ->execute_cmd
target: remove the task_sectors field in struct se_task
...
41 files changed, 1315 insertions, 2304 deletions
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index daf21b89999..5f6b7f63cde 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1099,9 +1099,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, dir = cmd->data_direction; BUG_ON(dir == DMA_NONE); - transport_do_task_sg_chain(cmd); - ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained; - ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no; + ioctx->sg = sg = sg_orig = cmd->t_data_sg; + ioctx->sg_cnt = sg_cnt = cmd->t_data_nents; count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, opposite_dma_dir(dir)); @@ -1769,7 +1768,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch, kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); goto send_sense; } - ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb); + ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb); if (ret < 0) { kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref); if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) { @@ -4004,9 +4003,6 @@ static int __init srpt_init_module(void) srpt_target->tf_ops = srpt_template; - /* Enable SG chaining */ - srpt_target->tf_ops.task_sg_chaining = true; - /* * Set up default attribute lists. */ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8b1d5e62ed4..d57d10cb2e4 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -27,8 +27,10 @@ #include <asm/unaligned.h> #include <scsi/scsi_device.h> #include <scsi/iscsi_proto.h> +#include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/target_core_configfs.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -593,7 +595,7 @@ static void __exit iscsi_target_cleanup_module(void) kfree(iscsit_global); } -int iscsit_add_reject( +static int iscsit_add_reject( u8 reason, int fail_conn, unsigned char *buf, @@ -622,7 +624,7 @@ int iscsit_add_reject( } spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); cmd->i_state = ISTATE_SEND_REJECT; @@ -669,7 +671,7 @@ int iscsit_add_reject_from_cmd( if (add_to_conn) { spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); } @@ -685,9 +687,7 @@ int iscsit_add_reject_from_cmd( /* * Map some portion of the allocated scatterlist to an iovec, suitable for - * kernel sockets to copy data in/out. This handles both pages and slab-allocated - * buffers, since we have been tricky and mapped t_mem_sg to the buffer in - * either case (see iscsit_alloc_buffs) + * kernel sockets to copy data in/out. */ static int iscsit_map_iovec( struct iscsi_cmd *cmd, @@ -700,10 +700,9 @@ static int iscsit_map_iovec( unsigned int page_off; /* - * We have a private mapping of the allocated pages in t_mem_sg. - * At this point, we also know each contains a page. + * We know each entry in t_data_sg contains a page. */ - sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; + sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; page_off = (data_offset % PAGE_SIZE); cmd->first_data_sg = sg; @@ -744,7 +743,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) conn->exp_statsn = exp_statsn; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { spin_lock(&cmd->istate_lock); if ((cmd->i_state == ISTATE_SENT_STATUS) && (cmd->stat_sn < exp_statsn)) { @@ -761,8 +760,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) { - u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : - cmd->se_cmd.t_data_nents; + u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); iov_count += ISCSI_IOV_DATA_BUFFER; @@ -776,64 +774,6 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) return 0; } -static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) -{ - struct scatterlist *sgl; - u32 length = cmd->se_cmd.data_length; - int nents = DIV_ROUND_UP(length, PAGE_SIZE); - int i = 0, j = 0, ret; - /* - * If no SCSI payload is present, allocate the default iovecs used for - * iSCSI PDU Header - */ - if (!length) - return iscsit_allocate_iovecs(cmd); - - sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL); - if (!sgl) - return -ENOMEM; - - sg_init_table(sgl, nents); - - while (length) { - int buf_size = min_t(int, length, PAGE_SIZE); - struct page *page; - - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto page_alloc_failed; - - sg_set_page(&sgl[i], page, buf_size, 0); - - length -= buf_size; - i++; - } - - cmd->t_mem_sg = sgl; - cmd->t_mem_sg_nents = nents; - - /* BIDI ops not supported */ - - /* Tell the core about our preallocated memory */ - transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0); - /* - * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd - * so that cmd->se_cmd.t_tasks_se_num has been set. - */ - ret = iscsit_allocate_iovecs(cmd); - if (ret < 0) - return -ENOMEM; - - return 0; - -page_alloc_failed: - while (j < i) - __free_page(sg_page(&sgl[j++])); - - kfree(sgl); - return -ENOMEM; -} - static int iscsit_handle_scsi_cmd( struct iscsi_conn *conn, unsigned char *buf) @@ -842,6 +782,8 @@ static int iscsit_handle_scsi_cmd( int dump_immediate_data = 0, send_check_condition = 0, payload_length; struct iscsi_cmd *cmd = NULL; struct iscsi_scsi_req *hdr; + int iscsi_task_attr; + int sam_task_attr; spin_lock_bh(&conn->sess->session_stats_lock); conn->sess->cmd_pdus++; @@ -958,15 +900,30 @@ done: (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : DMA_NONE; - cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, - (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK)); + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, - buf, conn); + buf, conn); - pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," - " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, - hdr->cmdsn, hdr->data_length, payload_length, conn->cid); + cmd->data_direction = data_direction; + iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; + /* + * Figure out the SAM Task Attribute for the incoming SCSI CDB + */ + if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || + (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) + sam_task_attr = MSG_SIMPLE_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) + sam_task_attr = MSG_ORDERED_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) + sam_task_attr = MSG_HEAD_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ACA) + sam_task_attr = MSG_ACA_TAG; + else { + pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" + " MSG_SIMPLE_TAG\n", iscsi_task_attr); + sam_task_attr = MSG_SIMPLE_TAG; + } cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; cmd->i_state = ISTATE_NEW_CMD; @@ -1003,6 +960,17 @@ done: } /* + * Initialize struct se_cmd descriptor from target_core_mod infrastructure + */ + transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, + conn->sess->se_sess, hdr->data_length, cmd->data_direction, + sam_task_attr, &cmd->sense_buffer[0]); + + pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," + " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, + hdr->cmdsn, hdr->data_length, payload_length, conn->cid); + + /* * The CDB is going to an se_device_t. */ ret = transport_lookup_cmd_lun(&cmd->se_cmd, @@ -1016,13 +984,8 @@ done: send_check_condition = 1; goto attach_cmd; } - /* - * The Initiator Node has access to the LUN (the addressing method - * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to - * allocate 1->N transport tasks (depending on sector count and - * maximum request size the physical HBA(s) can handle. - */ - transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb); + + transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); if (transport_ret == -ENOMEM) { return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1035,9 +998,7 @@ done: */ send_check_condition = 1; } else { - cmd->data_length = cmd->se_cmd.data_length; - - if (iscsit_decide_list_to_build(cmd, payload_length) < 0) + if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 1, buf, cmd); @@ -1045,18 +1006,15 @@ done: attach_cmd: spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. */ core_alua_check_nonop_delay(&cmd->se_cmd); - /* - * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). - * also call iscsit_allocate_iovecs() - */ - ret = iscsit_alloc_buffs(cmd); + + ret = iscsit_allocate_iovecs(cmd); if (ret < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1303,10 +1261,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); - if ((hdr->offset + payload_length) > cmd->data_length) { + if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) { pr_err("DataOut Offset: %u, Length %u greater than" " iSCSI Command EDTL %u, protocol error.\n", - hdr->offset, payload_length, cmd->data_length); + hdr->offset, payload_length, cmd->se_cmd.data_length); return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd); } @@ -1442,7 +1400,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) return 0; else if (ret == DATAOUT_SEND_R2T) { iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, conn, 0); + iscsit_build_r2ts_for_cmd(cmd, conn, false); } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { /* * Handle extra special case for out of order @@ -1617,7 +1575,7 @@ static int iscsit_handle_nop_out( * Initiator is expecting a NopIN ping reply, */ spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); @@ -1723,10 +1681,75 @@ static int iscsit_handle_task_mgt_cmd( (hdr->refcmdsn != ISCSI_RESERVED_TAG)) hdr->refcmdsn = ISCSI_RESERVED_TAG; - cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, buf, conn); + 1, buf, conn); + + cmd->data_direction = DMA_NONE; + + cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); + if (!cmd->tmr_req) { + pr_err("Unable to allocate memory for" + " Task Management command!\n"); + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + } + + /* + * TASK_REASSIGN for ERL=2 / connection stays inside of + * LIO-Target $FABRIC_MOD + */ + if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { + + u8 tcm_function; + int ret; + + transport_init_se_cmd(&cmd->se_cmd, + &lio_target_fabric_configfs->tf_ops, + conn->sess->se_sess, 0, DMA_NONE, + MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); + + switch (function) { + case ISCSI_TM_FUNC_ABORT_TASK: + tcm_function = TMR_ABORT_TASK; + break; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + tcm_function = TMR_ABORT_TASK_SET; + break; + case ISCSI_TM_FUNC_CLEAR_ACA: + tcm_function = TMR_CLEAR_ACA; + break; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + tcm_function = TMR_CLEAR_TASK_SET; + break; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + tcm_function = TMR_LUN_RESET; + break; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + tcm_function = TMR_TARGET_WARM_RESET; + break; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + tcm_function = TMR_TARGET_COLD_RESET; + break; + default: + pr_err("Unknown iSCSI TMR Function:" + " 0x%02x\n", function); + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + } + + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, + tcm_function, GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; + } cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -1804,7 +1827,7 @@ static int iscsit_handle_task_mgt_cmd( se_tmr->call_transport = 1; attach: spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { @@ -1980,7 +2003,7 @@ static int iscsit_handle_text_cmd( cmd->data_direction = DMA_NONE; spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); @@ -2168,7 +2191,7 @@ static int iscsit_handle_logout_cmd( logout_remove = 1; spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) @@ -2178,7 +2201,7 @@ static int iscsit_handle_logout_cmd( * Immediate commands are executed, well, immediately. * Non-Immediate Logout Commands are executed in CmdSN order. */ - if (hdr->opcode & ISCSI_OP_IMMEDIATE) { + if (cmd->immediate_cmd) { int ret = iscsit_execute_cmd(cmd, 0); if (ret < 0) @@ -2336,7 +2359,7 @@ static int iscsit_handle_immediate_data( cmd->write_data_done += length; - if (cmd->write_data_done == cmd->data_length) { + if (cmd->write_data_done == cmd->se_cmd.data_length) { spin_lock_bh(&cmd->istate_lock); cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; @@ -2381,7 +2404,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) cmd->i_state = ISTATE_SEND_ASYNCMSG; spin_lock_bh(&conn_p->cmd_lock); - list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); spin_unlock_bh(&conn_p->cmd_lock); iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); @@ -2434,10 +2457,19 @@ static int iscsit_send_conn_drop_async_message( return 0; } +static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) +{ + if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || + (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { + wait_for_completion_interruptible_timeout( + &conn->tx_half_close_comp, + ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); + } +} + static int iscsit_send_data_in( struct iscsi_cmd *cmd, - struct iscsi_conn *conn, - int *eodr) + struct iscsi_conn *conn) { int iov_ret = 0, set_statsn = 0; u32 iov_count = 0, tx_size = 0; @@ -2445,6 +2477,8 @@ static int iscsit_send_data_in( struct iscsi_datain_req *dr; struct iscsi_data_rsp *hdr; struct kvec *iov; + int eodr = 0; + int ret; memset(&datain, 0, sizeof(struct iscsi_datain)); dr = iscsit_get_datain_values(cmd, &datain); @@ -2457,11 +2491,11 @@ static int iscsit_send_data_in( /* * Be paranoid and double check the logic for now. */ - if ((datain.offset + datain.length) > cmd->data_length) { + if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x, datain.offset: %u and" " datain.length: %u exceeds cmd->data_length: %u\n", cmd->init_task_tag, datain.offset, datain.length, - cmd->data_length); + cmd->se_cmd.data_length); return -1; } @@ -2577,13 +2611,26 @@ static int iscsit_send_data_in( cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), ntohl(hdr->offset), datain.length, conn->cid); + /* sendpage is preferred but can't insert markers */ + if (!conn->conn_ops->IFMarker) + ret = iscsit_fe_sendpage_sg(cmd, conn); + else + ret = iscsit_send_tx_data(cmd, conn, 0); + + iscsit_unmap_iovec(cmd); + + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + if (dr->dr_complete) { - *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? + eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2 : 1; iscsit_free_datain_req(cmd, dr); } - return 0; + return eodr; } static int iscsit_send_logout_response( @@ -2715,6 +2762,7 @@ static int iscsit_send_unsolicited_nopin( { int tx_size = ISCSI_HDR_LEN; struct iscsi_nopin *hdr; + int ret; hdr = (struct iscsi_nopin *) cmd->pdu; memset(hdr, 0, ISCSI_HDR_LEN); @@ -2747,6 +2795,17 @@ static int iscsit_send_unsolicited_nopin( pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = want_response ? + ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + return 0; } @@ -2837,13 +2896,14 @@ static int iscsit_send_nopin_response( return 0; } -int iscsit_send_r2t( +static int iscsit_send_r2t( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { int tx_size = 0; struct iscsi_r2t *r2t; struct iscsi_r2t_rsp *hdr; + int ret; r2t = iscsit_get_r2t_from_list(cmd); if (!r2t) @@ -2899,19 +2959,27 @@ int iscsit_send_r2t( r2t->sent_r2t = 1; spin_unlock_bh(&cmd->r2t_lock); + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + spin_lock_bh(&cmd->dataout_timeout_lock); + iscsit_start_dataout_timer(cmd, conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); + return 0; } /* - * type 0: Normal Operation. - * type 1: Called from Storage Transport. - * type 2: Called from iscsi_task_reassign_complete_write() for - * connection recovery. + * @recovery: If called from iscsi_task_reassign_complete_write() for + * connection recovery. */ int iscsit_build_r2ts_for_cmd( struct iscsi_cmd *cmd, struct iscsi_conn *conn, - int type) + bool recovery) { int first_r2t = 1; u32 offset = 0, xfer_len = 0; @@ -2922,32 +2990,37 @@ int iscsit_build_r2ts_for_cmd( return 0; } - if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) - if (cmd->r2t_offset < cmd->write_data_done) - cmd->r2t_offset = cmd->write_data_done; + if (conn->sess->sess_ops->DataSequenceInOrder && + !recovery) + cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { if (conn->sess->sess_ops->DataSequenceInOrder) { offset = cmd->r2t_offset; - if (first_r2t && (type == 2)) { - xfer_len = ((offset + - (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len) > - cmd->data_length) ? - (cmd->data_length - offset) : - (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len)); + if (first_r2t && recovery) { + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; } else { - xfer_len = ((offset + - conn->sess->sess_ops->MaxBurstLength) > - cmd->data_length) ? - (cmd->data_length - offset) : - conn->sess->sess_ops->MaxBurstLength; + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = conn->sess->sess_ops->MaxBurstLength; } cmd->r2t_offset += xfer_len; - if (cmd->r2t_offset == cmd->data_length) + if (cmd->r2t_offset == cmd->se_cmd.data_length) cmd->cmd_flags |= ICF_SENT_LAST_R2T; } else { struct iscsi_seq *seq; @@ -3179,6 +3252,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) return ret; } +#define SENDTARGETS_BUF_LIMIT 32768U + static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) { char *payload = NULL; @@ -3187,12 +3262,10 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_tiqn *tiqn; struct iscsi_tpg_np *tpg_np; int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; - unsigned char buf[256]; + unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ - buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? - 32768 : conn->conn_ops->MaxRecvDataSegmentLength; - - memset(buf, 0, 256); + buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, + SENDTARGETS_BUF_LIMIT); payload = kzalloc(buffer_len, GFP_KERNEL); if (!payload) { @@ -3408,18 +3481,6 @@ static int iscsit_send_reject( return 0; } -static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) -{ - if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || - (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { - wait_for_completion_interruptible_timeout( - &conn->tx_half_close_comp, - ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); - } -} - -#ifdef CONFIG_SMP - void iscsit_thread_get_cpumask(struct iscsi_conn *conn) { struct iscsi_thread_set *ts = conn->thread_set; @@ -3433,10 +3494,6 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) * execute upon. */ ord = ts->thread_id % cpumask_weight(cpu_online_mask); -#if 0 - pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from" - " thread_id: %d\n", ord, ts->thread_id); -#endif for_each_online_cpu(cpu) { if (ord-- == 0) { cpumask_set_cpu(cpu, conn->conn_cpumask); @@ -3476,34 +3533,196 @@ static inline void iscsit_thread_check_cpumask( */ memset(buf, 0, 128); cpumask_scnprintf(buf, 128, conn->conn_cpumask); -#if 0 - pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():" - " %s for %s\n", buf, p->comm); -#endif set_cpus_allowed_ptr(p, conn->conn_cpumask); } -#else - -void iscsit_thread_get_cpumask(struct iscsi_conn *conn) +static int handle_immediate_queue(struct iscsi_conn *conn) { - return; + struct iscsi_queue_req *qr; + struct iscsi_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { + atomic_set(&conn->check_immediate_queue, 0); + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + + switch (state) { + case ISTATE_SEND_R2T: + ret = iscsit_send_r2t(cmd, conn); + if (ret < 0) + goto err; + break; + case ISTATE_REMOVE: + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + + spin_lock_bh(&conn->cmd_lock); + list_del(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_free_cmd(cmd); + continue; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + iscsit_mod_nopin_response_timer(conn); + ret = iscsit_send_unsolicited_nopin(cmd, + conn, 1); + if (ret < 0) + goto err; + break; + case ISTATE_SEND_NOPIN_NO_RESPONSE: + ret = iscsit_send_unsolicited_nopin(cmd, + conn, 0); + if (ret < 0) + goto err; + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, state, + conn->cid); + goto err; + } + } + + return 0; + +err: + return -1; } -#define iscsit_thread_check_cpumask(X, Y, Z) ({}) -#endif /* CONFIG_SMP */ +static int handle_response_queue(struct iscsi_conn *conn) +{ + struct iscsi_queue_req *qr; + struct iscsi_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_response_queue(conn))) { + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + +check_rsp_state: + switch (state) { + case ISTATE_SEND_DATAIN: + ret = iscsit_send_data_in(cmd, conn); + if (ret < 0) + goto err; + else if (!ret) + /* more drs */ + goto check_rsp_state; + else if (ret == 1) { + /* all done */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + continue; + } else if (ret == 2) { + /* Still must send status, + SCF_TRANSPORT_TASK_SENSE was set */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SEND_STATUS; + spin_unlock_bh(&cmd->istate_lock); + state = ISTATE_SEND_STATUS; + goto check_rsp_state; + } + + break; + case ISTATE_SEND_STATUS: + case ISTATE_SEND_STATUS_RECOVERY: + ret = iscsit_send_status(cmd, conn); + break; + case ISTATE_SEND_LOGOUTRSP: + ret = iscsit_send_logout_response(cmd, conn); + break; + case ISTATE_SEND_ASYNCMSG: + ret = iscsit_send_conn_drop_async_message( + cmd, conn); + break; + case ISTATE_SEND_NOPIN: + ret = iscsit_send_nopin_response(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = iscsit_send_reject(cmd, conn); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = iscsit_send_task_mgt_rsp(cmd, conn); + if (ret != 0) + break; + ret = iscsit_tmr_post_handler(cmd, conn); + if (ret != 0) + iscsit_fall_back_to_erl0(conn->sess); + break; + case ISTATE_SEND_TEXTRSP: + ret = iscsit_send_text_rsp(cmd, conn); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + state, conn->cid); + goto err; + } + if (ret < 0) + goto err; + + if (iscsit_send_tx_data(cmd, conn, 1) < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + iscsit_unmap_iovec(cmd); + goto err; + } + iscsit_unmap_iovec(cmd); + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + if (!iscsit_logout_post_handler(cmd, conn)) + goto restart; + /* fall through */ + case ISTATE_SEND_STATUS: + case ISTATE_SEND_ASYNCMSG: + case ISTATE_SEND_NOPIN: + case ISTATE_SEND_STATUS_RECOVERY: + case ISTATE_SEND_TEXTRSP: + case ISTATE_SEND_TASKMGTRSP: + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + break; + case ISTATE_SEND_REJECT: + if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { + cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; + complete(&cmd->reject_comp); + goto err; + } + complete(&cmd->reject_comp); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + cmd->i_state, conn->cid); + goto err; + } + + if (atomic_read(&conn->check_immediate_queue)) + break; + } + + return 0; + +err: + return -1; +restart: + return -EAGAIN; +} int iscsi_target_tx_thread(void *arg) { - u8 state; - int eodr = 0; int ret = 0; - int sent_status = 0; - int use_misc = 0; - int map_sg = 0; - struct iscsi_cmd *cmd = NULL; struct iscsi_conn *conn; - struct iscsi_queue_req *qr = NULL; struct iscsi_thread_set *ts = arg; /* * Allow ourselves to be interrupted by SIGINT so that a @@ -3516,7 +3735,7 @@ restart: if (!conn) goto out; - eodr = map_sg = ret = sent_status = use_misc = 0; + ret = 0; while (!kthread_should_stop()) { /* @@ -3531,251 +3750,15 @@ restart: signal_pending(current)) goto transport_err; -get_immediate: - qr = iscsit_get_cmd_from_immediate_queue(conn); - if (qr) { - atomic_set(&conn->check_immediate_queue, 0); - cmd = qr->cmd; - state = qr->state; - kmem_cache_free(lio_qr_cache, qr); - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_R2T: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_r2t(cmd, conn); - break; - case ISTATE_REMOVE: - spin_unlock_bh(&cmd->istate_lock); - - if (cmd->data_direction == DMA_TO_DEVICE) - iscsit_stop_dataout_timer(cmd); - - spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_list); - spin_unlock_bh(&conn->cmd_lock); - - iscsit_free_cmd(cmd); - goto get_immediate; - case ISTATE_SEND_NOPIN_WANT_RESPONSE: - spin_unlock_bh(&cmd->istate_lock); - iscsit_mod_nopin_response_timer(conn); - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 1); - break; - case ISTATE_SEND_NOPIN_NO_RESPONSE: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 0); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, state, - conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - if (ret < 0) { - conn->tx_immediate_queue = 0; - goto transport_err; - } - - if (iscsit_send_tx_data(cmd, conn, 1) < 0) { - conn->tx_immediate_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - goto transport_err; - } - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_R2T: - spin_unlock_bh(&cmd->istate_lock); - spin_lock_bh(&cmd->dataout_timeout_lock); - iscsit_start_dataout_timer(cmd, conn); - spin_unlock_bh(&cmd->dataout_timeout_lock); - break; - case ISTATE_SEND_NOPIN_WANT_RESPONSE: - cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE; - spin_unlock_bh(&cmd->istate_lock); - break; - case ISTATE_SEND_NOPIN_NO_RESPONSE: - cmd->i_state = ISTATE_SENT_STATUS; - spin_unlock_bh(&cmd->istate_lock); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - goto get_immediate; - } else - conn->tx_immediate_queue = 0; - -get_response: - qr = iscsit_get_cmd_from_response_queue(conn); - if (qr) { - cmd = qr->cmd; - state = qr->state; - kmem_cache_free(lio_qr_cache, qr); - - spin_lock_bh(&cmd->istate_lock); -check_rsp_state: - switch (state) { - case ISTATE_SEND_DATAIN: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_data_in(cmd, conn, - &eodr); - map_sg = 1; - break; - case ISTATE_SEND_STATUS: - case ISTATE_SEND_STATUS_RECOVERY: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_status(cmd, conn); - break; - case ISTATE_SEND_LOGOUTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_logout_response(cmd, conn); - break; - case ISTATE_SEND_ASYNCMSG: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_conn_drop_async_message( - cmd, conn); - break; - case ISTATE_SEND_NOPIN: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_nopin_response(cmd, conn); - break; - case ISTATE_SEND_REJECT: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_reject(cmd, conn); - break; - case ISTATE_SEND_TASKMGTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_task_mgt_rsp(cmd, conn); - if (ret != 0) - break; - ret = iscsit_tmr_post_handler(cmd, conn); - if (ret != 0) - iscsit_fall_back_to_erl0(conn->sess); - break; - case ISTATE_SEND_TEXTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_text_rsp(cmd, conn); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - if (ret < 0) { - conn->tx_response_queue = 0; - goto transport_err; - } - - if (map_sg && !conn->conn_ops->IFMarker) { - if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { - conn->tx_response_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto transport_err; - } - } else { - if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) { - conn->tx_response_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto transport_err; - } - } - map_sg = 0; - iscsit_unmap_iovec(cmd); - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_DATAIN: - if (!eodr) - goto check_rsp_state; - - if (eodr == 1) { - cmd->i_state = ISTATE_SENT_LAST_DATAIN; - sent_status = 1; - eodr = use_misc = 0; - } else if (eodr == 2) { - cmd->i_state = state = - ISTATE_SEND_STATUS; - sent_status = 0; - eodr = use_misc = 0; - goto check_rsp_state; - } - break; - case ISTATE_SEND_STATUS: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_ASYNCMSG: - case ISTATE_SEND_NOPIN: - case ISTATE_SEND_STATUS_RECOVERY: - case ISTATE_SEND_TEXTRSP: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_REJECT: - use_misc = 0; - if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { - cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; - spin_unlock_bh(&cmd->istate_lock); - complete(&cmd->reject_comp); - goto transport_err; - } - complete(&cmd->reject_comp); - break; - case ISTATE_SEND_TASKMGTRSP: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_LOGOUTRSP: - spin_unlock_bh(&cmd->istate_lock); - if (!iscsit_logout_post_handler(cmd, conn)) - goto restart; - spin_lock_bh(&cmd->istate_lock); - use_misc = 0; - sent_status = 1; - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - cmd->i_state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - - if (sent_status) { - cmd->i_state = ISTATE_SENT_STATUS; - sent_status = 0; - } - spin_unlock_bh(&cmd->istate_lock); - - if (atomic_read(&conn->check_immediate_queue)) - goto get_immediate; + ret = handle_immediate_queue(conn); + if (ret < 0) + goto transport_err; - goto get_response; - } else - conn->tx_response_queue = 0; + ret = handle_response_queue(conn); + if (ret == -EAGAIN) + goto restart; + else if (ret < 0) + goto transport_err; } transport_err: @@ -3952,9 +3935,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) * has been reset -> returned sleeping pre-handler state. */ spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -3972,7 +3955,7 @@ static void iscsit_stop_timers_for_cmds( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->data_direction == DMA_TO_DEVICE) iscsit_stop_dataout_timer(cmd); } diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 5db2ddeed5e..12abb4c9e34 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -18,8 +18,7 @@ extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); -extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *); -extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int); +extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery); extern void iscsit_thread_get_cpumask(struct iscsi_conn *); extern int iscsi_target_tx_thread(void *); extern int iscsi_target_rx_thread(void *); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 00c58cc82c8..69dc8e35c03 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1538,7 +1538,7 @@ static int lio_write_pending(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); if (!cmd->immediate_data && !cmd->unsolicited_data) - return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1); + return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); return 0; } diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 2aaee7efa68..1c70144cdaf 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -296,12 +296,11 @@ struct iscsi_datain_req { u32 runlength; u32 data_length; u32 data_offset; - u32 data_offset_end; u32 data_sn; u32 next_burst_len; u32 read_data_done; u32 seq_send_order; - struct list_head dr_list; + struct list_head cmd_datain_node; } ____cacheline_aligned; struct iscsi_ooo_cmdsn { @@ -381,8 +380,6 @@ struct iscsi_cmd { u32 buf_ptr_size; /* Used to store DataDigest */ u32 data_crc; - /* Total size in bytes associated with command */ - u32 data_length; /* Counter for MaxOutstandingR2T */ u32 outstanding_r2ts; /* Next R2T Offset when DataSequenceInOrder=Yes */ @@ -464,16 +461,13 @@ struct iscsi_cmd { /* Session the command is part of, used for connection recovery */ struct iscsi_session *sess; /* list_head for connection list */ - struct list_head i_list; + struct list_head i_conn_node; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd se_cmd; /* Sense buffer that will be mapped into outgoing status */ #define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2) unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN]; - struct scatterlist *t_mem_sg; - u32 t_mem_sg_nents; - u32 padding; u8 pad_bytes[4]; @@ -500,8 +494,6 @@ struct iscsi_conn { u8 network_transport; enum iscsi_timer_flags_table nopin_timer_flags; enum iscsi_timer_flags_table nopin_response_timer_flags; - u8 tx_immediate_queue; - u8 tx_response_queue; /* Used to know what thread encountered a transport failure */ u8 which_thread; /* connection id assigned by the Initiator */ diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index 8c049512951..848fee76894 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -37,7 +37,7 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void) " struct iscsi_datain_req\n"); return NULL; } - INIT_LIST_HEAD(&dr->dr_list); + INIT_LIST_HEAD(&dr->cmd_datain_node); return dr; } @@ -45,14 +45,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void) void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) { spin_lock(&cmd->datain_lock); - list_add_tail(&dr->dr_list, &cmd->datain_list); + list_add_tail(&dr->cmd_datain_node, &cmd->datain_list); spin_unlock(&cmd->datain_lock); } void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) { spin_lock(&cmd->datain_lock); - list_del(&dr->dr_list); + list_del(&dr->cmd_datain_node); spin_unlock(&cmd->datain_lock); kmem_cache_free(lio_dr_cache, dr); @@ -63,8 +63,8 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd) struct iscsi_datain_req *dr, *dr_tmp; spin_lock(&cmd->datain_lock); - list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) { - list_del(&dr->dr_list); + list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) { + list_del(&dr->cmd_datain_node); kmem_cache_free(lio_dr_cache, dr); } spin_unlock(&cmd->datain_lock); @@ -72,17 +72,14 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd) struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd) { - struct iscsi_datain_req *dr; - if (list_empty(&cmd->datain_list)) { pr_err("cmd->datain_list is empty for ITT:" " 0x%08x\n", cmd->init_task_tag); return NULL; } - list_for_each_entry(dr, &cmd->datain_list, dr_list) - break; - return dr; + return list_first_entry(&cmd->datain_list, struct iscsi_datain_req, + cmd_datain_node); } /* @@ -113,7 +110,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes( read_data_done = (!dr->recovery) ? cmd->read_data_done : dr->read_data_done; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -212,7 +209,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( seq_send_order = (!dr->recovery) ? cmd->seq_send_order : dr->seq_send_order; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -231,8 +228,8 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( offset = (seq->offset + seq->next_burst_len); if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { - datain->length = (cmd->data_length - offset); + cmd->se_cmd.data_length) { + datain->length = (cmd->se_cmd.data_length - offset); datain->offset = offset; datain->flags |= ISCSI_FLAG_CMD_FINAL; @@ -264,7 +261,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( } } - if ((read_data_done + datain->length) == cmd->data_length) + if ((read_data_done + datain->length) == cmd->se_cmd.data_length) datain->flags |= ISCSI_FLAG_DATA_STATUS; datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; @@ -333,7 +330,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no( read_data_done = (!dr->recovery) ? cmd->read_data_done : dr->read_data_done; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -344,7 +341,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no( if (!pdu) return dr; - if ((read_data_done + pdu->length) == cmd->data_length) { + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) { pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS); if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) pdu->flags |= ISCSI_FLAG_DATA_ACK; @@ -433,7 +430,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no( seq_send_order = (!dr->recovery) ? cmd->seq_send_order : dr->seq_send_order; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -463,7 +460,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no( } else seq->next_burst_len += pdu->length; - if ((read_data_done + pdu->length) == cmd->data_length) + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) pdu->flags |= ISCSI_FLAG_DATA_STATUS; pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1ab0560b092..1a02016ecda 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -48,9 +48,9 @@ void iscsit_set_dataout_sequence_values( if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->write_data_done + - (cmd->data_length > + (cmd->se_cmd.data_length > conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length); + conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length); return; } @@ -59,15 +59,15 @@ void iscsit_set_dataout_sequence_values( if (!cmd->seq_start_offset && !cmd->seq_end_offset) { cmd->seq_start_offset = cmd->write_data_done; - cmd->seq_end_offset = (cmd->data_length > + cmd->seq_end_offset = (cmd->se_cmd.data_length > conn->sess->sess_ops->MaxBurstLength) ? (cmd->write_data_done + - conn->sess->sess_ops->MaxBurstLength) : cmd->data_length; + conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length; } else { cmd->seq_start_offset = cmd->seq_end_offset; cmd->seq_end_offset = ((cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength) >= - cmd->data_length) ? cmd->data_length : + cmd->se_cmd.data_length) ? cmd->se_cmd.data_length : (cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength); } @@ -182,13 +182,13 @@ static int iscsit_dataout_check_unsolicited_sequence( if (!conn->sess->sess_ops->DataPDUInOrder) goto out; - if ((first_burst_len != cmd->data_length) && + if ((first_burst_len != cmd->se_cmd.data_length) && (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data" " received %u does not equal FirstBurstLength: %u, and" " does not equal ExpXferLen %u.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength, - cmd->data_length); + cmd->se_cmd.data_length); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; @@ -201,10 +201,10 @@ static int iscsit_dataout_check_unsolicited_sequence( conn->sess->sess_ops->FirstBurstLength); return DATAOUT_CANNOT_RECOVER; } - if (first_burst_len == cmd->data_length) { + if (first_burst_len == cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" - " error.\n", cmd->init_task_tag, cmd->data_length); + " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length); return DATAOUT_CANNOT_RECOVER; } } @@ -294,7 +294,7 @@ static int iscsit_dataout_check_sequence( if ((next_burst_len < conn->sess->sess_ops->MaxBurstLength) && ((cmd->write_data_done + payload_length) < - cmd->data_length)) { + cmd->se_cmd.data_length)) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); @@ -319,7 +319,7 @@ static int iscsit_dataout_check_sequence( return DATAOUT_CANNOT_RECOVER; } if ((cmd->write_data_done + payload_length) == - cmd->data_length) { + cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", @@ -640,9 +640,12 @@ static int iscsit_dataout_post_crc_passed( cmd->write_data_done += payload_length; - return (cmd->write_data_done == cmd->data_length) ? - DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ? - DATAOUT_SEND_R2T : DATAOUT_NORMAL; + if (cmd->write_data_done == cmd->se_cmd.data_length) + return DATAOUT_SEND_TO_TRANSPORT; + else if (send_r2t) + return DATAOUT_SEND_R2T; + else + return DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_failed( diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 006f605edb0..ecdd46deedd 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -279,11 +279,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( * seq->first_datasn and seq->last_datasn have not been set. */ if (!seq->sent) { -#if 0 pr_err("Ignoring non-sent sequence 0x%08x ->" " 0x%08x\n\n", seq->first_datasn, seq->last_datasn); -#endif continue; } @@ -294,11 +292,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn < begrun) && (seq->last_datasn < begrun)) { -#if 0 pr_err("Pre BegRun sequence 0x%08x ->" " 0x%08x\n", seq->first_datasn, seq->last_datasn); -#endif + read_data_done += cmd->seq_list[i].xfer_len; seq->next_burst_len = seq->pdu_send_order = 0; continue; @@ -309,11 +306,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn <= begrun) && (seq->last_datasn >= begrun)) { -#if 0 pr_err("Found sequence begrun: 0x%08x in" " 0x%08x -> 0x%08x\n", begrun, seq->first_datasn, seq->last_datasn); -#endif + seq_send_order = seq->seq_send_order; data_sn = seq->first_datasn; seq->next_burst_len = seq->pdu_send_order = 0; @@ -369,10 +365,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn > begrun) || (seq->last_datasn > begrun)) { -#if 0 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n", seq->first_datasn, seq->last_datasn); -#endif + seq->next_burst_len = seq->pdu_send_order = 0; continue; } @@ -526,7 +521,7 @@ int iscsit_handle_status_snack( found_cmd = 0; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->stat_sn == begrun) { found_cmd = 1; break; @@ -987,7 +982,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) return 0; iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0); + iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); } return 0; } @@ -1121,8 +1116,8 @@ static int iscsit_set_dataout_timeout_values( if (cmd->unsolicited_data) { *offset = 0; *length = (conn->sess->sess_ops->FirstBurstLength > - cmd->data_length) ? - cmd->data_length : + cmd->se_cmd.data_length) ? + cmd->se_cmd.data_length : conn->sess->sess_ops->FirstBurstLength; return 0; } @@ -1193,8 +1188,8 @@ static void iscsit_handle_dataout_timeout(unsigned long data) if (conn->sess->sess_ops->DataPDUInOrder) { pdu_offset = cmd->write_data_done; if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len)) > cmd->data_length) - pdu_length = (cmd->data_length - + cmd->next_burst_len)) > cmd->se_cmd.data_length) + pdu_length = (cmd->se_cmd.data_length - cmd->write_data_done); else pdu_length = (conn->sess->sess_ops->MaxBurstLength - diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 1af1f21af21..65aac14fd83 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,9 +138,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd); @@ -160,9 +160,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd); @@ -220,7 +220,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); return --cr->cmd_count; } @@ -234,7 +234,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) && (cmd->deferred_i_state != ISTATE_REMOVE)) || @@ -297,11 +297,11 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) mutex_unlock(&sess->cmdsn_mutex); spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); @@ -339,14 +339,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_list); to release the command to the + * list_del(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after * sending the TMR response. */ spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { @@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); @@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); @@ -397,7 +397,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); @@ -407,7 +407,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) * Add the struct iscsi_cmd to the connection recovery cmd list */ spin_lock(&cr->conn_recovery_cmd_lock); - list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list); + list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list); spin_unlock(&cr->conn_recovery_cmd_lock); spin_lock_bh(&conn->cmd_lock); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index eb05c9d751e..ed5241e7f12 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -803,14 +803,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt value = simple_strtoul(value_ptr, &tmpptr, 0); -/* #warning FIXME: Fix this */ -#if 0 - if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) { - pr_err("Illegal value \"%s\" for \"%s\".\n", - value, param->name); - return -1; - } -#endif if (IS_TYPERANGE_0_TO_2(param)) { if ((value < 0) || (value > 2)) { pr_err("Illegal value for \"%s\", must be" @@ -1045,13 +1037,6 @@ static char *iscsi_check_valuelist_for_support( tmp2 = strchr(acceptor_values, ','); if (tmp2) *tmp2 = '\0'; - if (!acceptor_values || !proposer_values) { - if (tmp1) - *tmp1 = ','; - if (tmp2) - *tmp2 = ','; - return NULL; - } if (!strcmp(acceptor_values, proposer_values)) { if (tmp2) *tmp2 = ','; @@ -1061,8 +1046,6 @@ static char *iscsi_check_valuelist_for_support( *tmp2++ = ','; acceptor_values = tmp2; - if (!acceptor_values) - break; } while (acceptor_values); if (tmp1) *tmp1++ = ','; diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c index fc694082bfc..85a306e067b 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c @@ -24,11 +24,13 @@ #include "iscsi_target_core.h" #include "iscsi_target_util.h" +#include "iscsi_target_tpg.h" #include "iscsi_target_seq_pdu_list.h" #define OFFLOAD_BUF_SIZE 32768 -void iscsit_dump_seq_list(struct iscsi_cmd *cmd) +#ifdef DEBUG +static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) { int i; struct iscsi_seq *seq; @@ -46,7 +48,7 @@ void iscsit_dump_seq_list(struct iscsi_cmd *cmd) } } -void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) +static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) { int i; struct iscsi_pdu *pdu; @@ -61,6 +63,10 @@ void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) pdu->length, pdu->pdu_send_order, pdu->seq_no); } } +#else +static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {} +static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {} +#endif static void iscsit_ordered_seq_lists( struct iscsi_cmd *cmd, @@ -135,11 +141,11 @@ redo: seq_count++; continue; } - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory" " for random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -155,11 +161,11 @@ redo: } if (seq_count) { - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for" " random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -187,10 +193,10 @@ static int iscsit_randomize_seq_lists( if (!seq_count) return 0; - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -221,11 +227,10 @@ static void iscsit_determine_counts_for_list( if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) - unsolicited_data_length = (cmd->data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length; + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); - while (offset < cmd->data_length) { + while (offset < cmd->se_cmd.data_length) { *pdu_count += 1; if (check_immediate) { @@ -239,10 +244,10 @@ static void iscsit_determine_counts_for_list( } if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) - >= cmd->data_length) { + >= cmd->se_cmd.data_length) { unsolicited_data_length -= - (cmd->data_length - offset); - offset += (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) @@ -263,8 +268,8 @@ static void iscsit_determine_counts_for_list( continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { - offset += (cmd->data_length - offset); + cmd->se_cmd.data_length) { + offset += (cmd->se_cmd.data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= @@ -283,10 +288,10 @@ static void iscsit_determine_counts_for_list( /* - * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No - * and DataPDUInOrder=No. + * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No + * or DataPDUInOrder=No. */ -static int iscsit_build_pdu_and_seq_list( +static int iscsit_do_build_pdu_and_seq_lists( struct iscsi_cmd *cmd, struct iscsi_build_list *bl) { @@ -306,11 +311,10 @@ static int iscsit_build_pdu_and_seq_list( if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) - unsolicited_data_length = (cmd->data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length; + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); - while (offset < cmd->data_length) { + while (offset < cmd->se_cmd.data_length) { pdu_count++; if (!datapduinorder) { pdu[i].offset = offset; @@ -346,21 +350,21 @@ static int iscsit_build_pdu_and_seq_list( if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { + cmd->se_cmd.data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_UNSOLICITED; pdu[i].length = - (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_UNSOLICITED; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + - (cmd->data_length - offset)); + (cmd->se_cmd.data_length - offset)); } unsolicited_data_length -= - (cmd->data_length - offset); - offset += (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((offset + @@ -402,18 +406,18 @@ static int iscsit_build_pdu_and_seq_list( continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { + cmd->se_cmd.data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_NORMAL; - pdu[i].length = (cmd->data_length - offset); + pdu[i].length = (cmd->se_cmd.data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_NORMAL; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + - (cmd->data_length - offset)); + (cmd->se_cmd.data_length - offset)); } - offset += (cmd->data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= @@ -464,9 +468,8 @@ static int iscsit_build_pdu_and_seq_list( } else iscsit_ordered_seq_lists(cmd, bl->type); } -#if 0 + iscsit_dump_seq_list(cmd); -#endif } if (!datapduinorder) { if (bl->data_direction & ISCSI_PDU_WRITE) { @@ -484,50 +487,86 @@ static int iscsit_build_pdu_and_seq_list( } else iscsit_ordered_pdu_lists(cmd, bl->type); } -#if 0 + iscsit_dump_pdu_list(cmd); -#endif } return 0; } -/* - * Only called while DataSequenceInOrder=No or DataPDUInOrder=No. - */ -int iscsit_do_build_list( +int iscsit_build_pdu_and_seq_lists( struct iscsi_cmd *cmd, - struct iscsi_build_list *bl) + u32 immediate_data_length) { + struct iscsi_build_list bl; u32 pdu_count = 0, seq_count = 1; struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu = NULL; struct iscsi_seq *seq = NULL; - iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count); + struct iscsi_session *sess = conn->sess; + struct iscsi_node_attrib *na; + + /* + * Do nothing if no OOO shenanigans + */ + if (sess->sess_ops->DataSequenceInOrder && + sess->sess_ops->DataPDUInOrder) + return 0; + + if (cmd->data_direction == DMA_NONE) + return 0; + + na = iscsit_tpg_get_node_attrib(sess); + memset(&bl, 0, sizeof(struct iscsi_build_list)); + + if (cmd->data_direction == DMA_FROM_DEVICE) { + bl.data_direction = ISCSI_PDU_READ; + bl.type = PDULIST_NORMAL; + if (na->random_datain_pdu_offsets) + bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; + if (na->random_datain_seq_offsets) + bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; + } else { + bl.data_direction = ISCSI_PDU_WRITE; + bl.immediate_data_length = immediate_data_length; + if (na->random_r2t_offsets) + bl.randomize |= RANDOM_R2T_OFFSETS; + + if (!cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_NORMAL; + else if (cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE; + else if (!cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_UNSOLICITED; + else if (cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; + } + + iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count); if (!conn->sess->sess_ops->DataSequenceInOrder) { - seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC); + seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC); if (!seq) { pr_err("Unable to allocate struct iscsi_seq list\n"); - return -1; + return -ENOMEM; } cmd->seq_list = seq; cmd->seq_count = seq_count; } if (!conn->sess->sess_ops->DataPDUInOrder) { - pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC); + pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC); if (!pdu) { pr_err("Unable to allocate struct iscsi_pdu list.\n"); kfree(seq); - return -1; + return -ENOMEM; } cmd->pdu_list = pdu; cmd->pdu_count = pdu_count; } - return iscsit_build_pdu_and_seq_list(cmd, bl); + return iscsit_do_build_pdu_and_seq_lists(cmd, &bl); } struct iscsi_pdu *iscsit_get_pdu_holder( @@ -572,13 +611,12 @@ redo: pdu = &cmd->pdu_list[cmd->pdu_start]; for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) { -#if 0 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu" "_send_order: %d, pdu[i].offset: %d," " pdu[i].length: %d\n", pdu[i].seq_no, pdu[i].pdu_send_order, pdu[i].offset, pdu[i].length); -#endif + if (pdu[i].pdu_send_order == cmd->pdu_send_order) { cmd->pdu_send_order++; return &pdu[i]; @@ -601,11 +639,11 @@ redo: pr_err("struct iscsi_seq is NULL!\n"); return NULL; } -#if 0 + pr_debug("seq->pdu_start: %d, seq->pdu_count: %d," " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count, seq->seq_no); -#endif + pdu = &cmd->pdu_list[seq->pdu_start]; if (seq->pdu_send_order == seq->pdu_count) { @@ -645,12 +683,11 @@ struct iscsi_seq *iscsit_get_seq_holder( } for (i = 0; i < cmd->seq_count; i++) { -#if 0 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]." "xfer_len: %d, seq_list[i].seq_no %u\n", cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len, cmd->seq_list[i].seq_no); -#endif + if ((cmd->seq_list[i].orig_offset + cmd->seq_list[i].xfer_len) >= (offset + length)) diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h index 0d52a10e306..d5b153751a8 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h @@ -78,7 +78,7 @@ struct iscsi_seq { u32 xfer_len; } ____cacheline_aligned; -extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *); +extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *); extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index e01da9d2b37..f4e640b51fd 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -78,10 +78,7 @@ int iscsit_tmr_task_warm_reset( { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); -#if 0 - struct iscsi_init_task_mgt_cmnd *hdr = - (struct iscsi_init_task_mgt_cmnd *) buf; -#endif + if (!na->tmr_warm_reset) { pr_err("TMR Opcode TARGET_WARM_RESET authorization" " failed for Initiator Node: %s\n", @@ -216,7 +213,7 @@ static int iscsit_task_reassign_complete_nop_out( iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); cmd->i_state = ISTATE_SEND_NOPIN; @@ -272,9 +269,9 @@ static int iscsit_task_reassign_complete_write( offset = cmd->next_burst_len = cmd->write_data_done; if ((conn->sess->sess_ops->FirstBurstLength - offset) >= - cmd->data_length) { + cmd->se_cmd.data_length) { no_build_r2ts = 1; - length = (cmd->data_length - offset); + length = (cmd->se_cmd.data_length - offset); } else length = (conn->sess->sess_ops->FirstBurstLength - offset); @@ -292,7 +289,7 @@ static int iscsit_task_reassign_complete_write( /* * iscsit_build_r2ts_for_cmd() can handle the rest from here. */ - return iscsit_build_r2ts_for_cmd(cmd, conn, 2); + return iscsit_build_r2ts_for_cmd(cmd, conn, true); } static int iscsit_task_reassign_complete_read( @@ -385,7 +382,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 4eba86d2bd8..b42cdeb153d 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -163,7 +163,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) } cmd->conn = conn; - INIT_LIST_HEAD(&cmd->i_list); + INIT_LIST_HEAD(&cmd->i_conn_node); INIT_LIST_HEAD(&cmd->datain_list); INIT_LIST_HEAD(&cmd->cmd_r2t_list); init_completion(&cmd->reject_comp); @@ -176,174 +176,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) return cmd; } -/* - * Called from iscsi_handle_scsi_cmd() - */ -struct iscsi_cmd *iscsit_allocate_se_cmd( - struct iscsi_conn *conn, - u32 data_length, - int data_direction, - int iscsi_task_attr) -{ - struct iscsi_cmd *cmd; - struct se_cmd *se_cmd; - int sam_task_attr; - - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->data_direction = data_direction; - cmd->data_length = data_length; - /* - * Figure out the SAM Task Attribute for the incoming SCSI CDB - */ - if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || - (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) - sam_task_attr = MSG_SIMPLE_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) - sam_task_attr = MSG_ORDERED_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) - sam_task_attr = MSG_HEAD_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_ACA) - sam_task_attr = MSG_ACA_TAG; - else { - pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" - " MSG_SIMPLE_TAG\n", iscsi_task_attr); - sam_task_attr = MSG_SIMPLE_TAG; - } - - se_cmd = &cmd->se_cmd; - /* - * Initialize struct se_cmd descriptor from target_core_mod infrastructure - */ - transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, - conn->sess->se_sess, data_length, data_direction, - sam_task_attr, &cmd->sense_buffer[0]); - return cmd; -} - -struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( - struct iscsi_conn *conn, - u8 function) -{ - struct iscsi_cmd *cmd; - struct se_cmd *se_cmd; - int rc; - u8 tcm_function; - - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->data_direction = DMA_NONE; - - cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); - if (!cmd->tmr_req) { - pr_err("Unable to allocate memory for" - " Task Management command!\n"); - goto out; - } - /* - * TASK_REASSIGN for ERL=2 / connection stays inside of - * LIO-Target $FABRIC_MOD - */ - if (function == ISCSI_TM_FUNC_TASK_REASSIGN) - return cmd; - - se_cmd = &cmd->se_cmd; - /* - * Initialize struct se_cmd descriptor from target_core_mod infrastructure - */ - transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, - conn->sess->se_sess, 0, DMA_NONE, - MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); - - switch (function) { - case ISCSI_TM_FUNC_ABORT_TASK: - tcm_function = TMR_ABORT_TASK; - break; - case ISCSI_TM_FUNC_ABORT_TASK_SET: - tcm_function = TMR_ABORT_TASK_SET; - break; - case ISCSI_TM_FUNC_CLEAR_ACA: - tcm_function = TMR_CLEAR_ACA; - break; - case ISCSI_TM_FUNC_CLEAR_TASK_SET: - tcm_function = TMR_CLEAR_TASK_SET; - break; - case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: - tcm_function = TMR_LUN_RESET; - break; - case ISCSI_TM_FUNC_TARGET_WARM_RESET: - tcm_function = TMR_TARGET_WARM_RESET; - break; - case ISCSI_TM_FUNC_TARGET_COLD_RESET: - tcm_function = TMR_TARGET_COLD_RESET; - break; - default: - pr_err("Unknown iSCSI TMR Function:" - " 0x%02x\n", function); - goto out; - } - - rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL); - if (rc < 0) - goto out; - - cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; - - return cmd; -out: - iscsit_release_cmd(cmd); - return NULL; -} - -int iscsit_decide_list_to_build( - struct iscsi_cmd *cmd, - u32 immediate_data_length) -{ - struct iscsi_build_list bl; - struct iscsi_conn *conn = cmd->conn; - struct iscsi_session *sess = conn->sess; - struct iscsi_node_attrib *na; - - if (sess->sess_ops->DataSequenceInOrder && - sess->sess_ops->DataPDUInOrder) - return 0; - - if (cmd->data_direction == DMA_NONE) - return 0; - - na = iscsit_tpg_get_node_attrib(sess); - memset(&bl, 0, sizeof(struct iscsi_build_list)); - - if (cmd->data_direction == DMA_FROM_DEVICE) { - bl.data_direction = ISCSI_PDU_READ; - bl.type = PDULIST_NORMAL; - if (na->random_datain_pdu_offsets) - bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; - if (na->random_datain_seq_offsets) - bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; - } else { - bl.data_direction = ISCSI_PDU_WRITE; - bl.immediate_data_length = immediate_data_length; - if (na->random_r2t_offsets) - bl.randomize |= RANDOM_R2T_OFFSETS; - - if (!cmd->immediate_data && !cmd->unsolicited_data) - bl.type = PDULIST_NORMAL; - else if (cmd->immediate_data && !cmd->unsolicited_data) - bl.type = PDULIST_IMMEDIATE; - else if (!cmd->immediate_data && cmd->unsolicited_data) - bl.type = PDULIST_UNSOLICITED; - else if (cmd->immediate_data && cmd->unsolicited_data) - bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; - } - - return iscsit_do_build_list(cmd, &bl); -} - struct iscsi_seq *iscsit_get_seq_holder_for_datain( struct iscsi_cmd *cmd, u32 seq_send_order) @@ -502,14 +334,14 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) return 0; - if (((cmd->first_burst_len + payload_length) != cmd->data_length) && + if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && ((cmd->first_burst_len + payload_length) != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data received %u" " does not equal FirstBurstLength: %u, and does" " not equal ExpXferLen %u.\n", (cmd->first_burst_len + payload_length), - conn->sess->sess_ops->FirstBurstLength, cmd->data_length); + conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); transport_send_check_condition_and_sense(se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return -1; @@ -524,7 +356,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -545,7 +377,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -568,7 +400,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_cmd *cmd = NULL; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->targ_xfer_tag == targ_xfer_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -596,7 +428,7 @@ int iscsit_find_cmd_for_recovery( spin_lock(&sess->cr_i_lock); list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); - list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_i_lock); @@ -616,7 +448,7 @@ int iscsit_find_cmd_for_recovery( spin_lock(&sess->cr_a_lock); list_for_each_entry(cr, &sess->cr_active_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); - list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_a_lock); @@ -813,7 +645,6 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) void iscsit_release_cmd(struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; - int i; iscsit_free_r2ts_from_list(cmd); iscsit_free_all_datain_reqs(cmd); @@ -824,11 +655,6 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) kfree(cmd->tmr_req); kfree(cmd->iov_data); - for (i = 0; i < cmd->t_mem_sg_nents; i++) - __free_page(sg_page(&cmd->t_mem_sg[i])); - - kfree(cmd->t_mem_sg); - if (conn) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); @@ -1038,7 +864,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) spin_unlock_bh(&conn->sess->ttt_lock); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (want_response) diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 835bf7de028..e1c729b8a1c 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -9,9 +9,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int); -extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8); -extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index a9b4eeefe9f..38dfac2b0a1 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -213,7 +213,7 @@ static void tcm_loop_submission_work(struct work_struct *work) * associated read buffers, go ahead and do that here for type * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB - * by target core in transport_generic_allocate_tasks() -> + * by target core in target_setup_cmd_from_cdb() -> * transport_generic_cmd_sequencer(). */ if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && @@ -227,7 +227,7 @@ static void tcm_loop_submission_work(struct work_struct *work) } } - ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); + ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd); if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index c7746a3339d..e624b836469 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -59,26 +59,31 @@ struct t10_alua_lu_gp *default_lu_gp; * * See spc4r17 section 6.27 */ -int target_emulate_report_target_port_groups(struct se_task *task) +int target_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned char *buf; - u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first - Target port group descriptor */ + u32 rd_len = 0, off; + int ext_hdr = (cmd->t_task_cdb[1] & 0x20); /* - * Need at least 4 bytes of response data or else we can't - * even fit the return data length. + * Skip over RESERVED area to first Target port group descriptor + * depending on the PARAMETER DATA FORMAT type.. */ - if (cmd->data_length < 4) { - pr_warn("REPORT TARGET PORT GROUPS allocation length %u" - " too small\n", cmd->data_length); + if (ext_hdr != 0) + off = 8; + else + off = 4; + + if (cmd->data_length < off) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" + " small for %s header\n", cmd->data_length, + (ext_hdr) ? "extended" : "normal"); + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } - buf = transport_kmap_data_sg(cmd); spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); @@ -159,15 +164,34 @@ int target_emulate_report_target_port_groups(struct se_task *task) /* * Set the RETURN DATA LENGTH set in the header of the DataIN Payload */ - buf[0] = ((rd_len >> 24) & 0xff); - buf[1] = ((rd_len >> 16) & 0xff); - buf[2] = ((rd_len >> 8) & 0xff); - buf[3] = (rd_len & 0xff); + put_unaligned_be32(rd_len, &buf[0]); + /* + * Fill in the Extended header parameter data format if requested + */ + if (ext_hdr != 0) { + buf[4] = 0x10; + /* + * Set the implict transition time (in seconds) for the application + * client to use as a base for it's transition timeout value. + * + * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN + * this CDB was received upon to determine this value individually + * for ALUA target port group. + */ + port = cmd->se_lun->lun_sep; + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (tg_pt_gp_mem) { + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if (tg_pt_gp) + buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + } + } transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -176,9 +200,8 @@ int target_emulate_report_target_port_groups(struct se_task *task) * * See spc4r17 section 6.35 */ -int target_emulate_set_target_port_groups(struct se_task *task) +int target_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; @@ -351,8 +374,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) out: transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -391,7 +413,7 @@ static inline int core_alua_state_standby( case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -433,7 +455,7 @@ static inline int core_alua_state_unavailable( case INQUIRY: case REPORT_LUNS: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -473,7 +495,7 @@ static inline int core_alua_state_transition( case INQUIRY: case REPORT_LUNS: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -1359,6 +1381,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; + tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; if (def_group) { spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); @@ -1855,6 +1878,37 @@ ssize_t core_alua_store_trans_delay_msecs( return count; } +ssize_t core_alua_show_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); +} + +ssize_t core_alua_store_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract implict_trans_secs\n"); + return -EINVAL; + } + if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { + pr_err("Passed implict_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICT_TRANS_SECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; + + return count; +} + ssize_t core_alua_show_preferred_bit( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index c5b4ecd3e74..f920c170d47 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -52,6 +52,12 @@ #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ /* + * Used for the recommended application client implict transition timeout + * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. + */ +#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICT_TRANS_SECS 255 +/* * Used by core_alua_update_tpg_primary_metadata() and * core_alua_update_tpg_secondary_metadata() */ @@ -66,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -extern int target_emulate_report_target_port_groups(struct se_task *); -extern int target_emulate_set_target_port_groups(struct se_task *); +extern int target_emulate_report_target_port_groups(struct se_cmd *); +extern int target_emulate_set_target_port_groups(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, @@ -107,6 +113,10 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, const char *, size_t); +extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, + const char *, size_t); extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 30a67707036..9888693a18f 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -432,6 +432,7 @@ static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; + u32 max_sectors; int have_tp = 0; /* @@ -456,7 +457,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]); + max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + put_unaligned_be32(max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH @@ -598,9 +601,8 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) return 0; } -int target_emulate_inquiry(struct se_task *task) +int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; unsigned char *buf, *map_buf; @@ -664,16 +666,13 @@ out: } transport_kunmap_data_sg(cmd); - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } -int target_emulate_readcapacity(struct se_task *task) +int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); @@ -697,14 +696,12 @@ int target_emulate_readcapacity(struct se_task *task) transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_readcapacity_16(struct se_task *task) +int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -732,8 +729,7 @@ int target_emulate_readcapacity_16(struct se_task *task) transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -872,9 +868,8 @@ target_modesense_dpofua(unsigned char *buf, int type) } } -int target_emulate_modesense(struct se_task *task) +int target_emulate_modesense(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; unsigned char *rbuf; @@ -947,14 +942,12 @@ int target_emulate_modesense(struct se_task *task) memcpy(rbuf, buf, offset); transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_request_sense(struct se_task *task) +int target_emulate_request_sense(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; unsigned char *cdb = cmd->t_task_cdb; unsigned char *buf; u8 ua_asc = 0, ua_ascq = 0; @@ -1008,8 +1001,7 @@ int target_emulate_request_sense(struct se_task *task) end: transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -1017,9 +1009,8 @@ end: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -int target_emulate_unmap(struct se_task *task) +int target_emulate_unmap(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf, *ptr = NULL; unsigned char *cdb = &cmd->t_task_cdb[0]; @@ -1066,10 +1057,8 @@ int target_emulate_unmap(struct se_task *task) err: transport_kunmap_data_sg(cmd); - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -1077,9 +1066,8 @@ err: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -int target_emulate_write_same(struct se_task *task) +int target_emulate_write_same(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; sector_t range; sector_t lba = cmd->t_task_lba; @@ -1118,79 +1106,25 @@ int target_emulate_write_same(struct se_task *task) return ret; } - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_synchronize_cache(struct se_task *task) +int target_emulate_synchronize_cache(struct se_cmd *cmd) { - struct se_device *dev = task->task_se_cmd->se_dev; - struct se_cmd *cmd = task->task_se_cmd; - - if (!dev->transport->do_sync_cache) { + if (!cmd->se_dev->transport->do_sync_cache) { pr_err("SYNCHRONIZE_CACHE emulation not supported" - " for: %s\n", dev->transport->name); + " for: %s\n", cmd->se_dev->transport->name); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; return -ENOSYS; } - dev->transport->do_sync_cache(task); + cmd->se_dev->transport->do_sync_cache(cmd); return 0; } -int target_emulate_noop(struct se_task *task) +int target_emulate_noop(struct se_cmd *cmd) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } - -/* - * Write a CDB into @cdb that is based on the one the intiator sent us, - * but updated to only cover the sectors that the current task handles. - */ -void target_get_task_cdb(struct se_task *task, unsigned char *cdb) -{ - struct se_cmd *cmd = task->task_se_cmd; - unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); - - memcpy(cdb, cmd->t_task_cdb, cdb_len); - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - unsigned long long lba = task->task_lba; - u32 sectors = task->task_sectors; - - switch (cdb_len) { - case 6: - /* 21-bit LBA and 8-bit sectors */ - cdb[1] = (lba >> 16) & 0x1f; - cdb[2] = (lba >> 8) & 0xff; - cdb[3] = lba & 0xff; - cdb[4] = sectors & 0xff; - break; - case 10: - /* 32-bit LBA and 16-bit sectors */ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(sectors, &cdb[7]); - break; - case 12: - /* 32-bit LBA and 32-bit sectors */ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[6]); - break; - case 16: - /* 64-bit LBA and 32-bit sectors */ - put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[10]); - break; - case 32: - /* 64-bit LBA and 32-bit sectors, extended CDB */ - put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(sectors, &cdb[28]); - break; - default: - BUG(); - } - } -} -EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index cbb66537d23..801efa89204 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_max_sectors); SE_DEV_ATTR_RO(hw_max_sectors); -DEF_DEV_ATTRIB(max_sectors); -SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); - DEF_DEV_ATTRIB(fabric_max_sectors); SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); @@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, - &target_core_dev_attrib_max_sectors.attr, &target_core_dev_attrib_fabric_max_sectors.attr, &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, @@ -2451,6 +2447,26 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* + * implict_trans_secs + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_implict_trans_secs(tg_pt_gp, page); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); + +/* * preferred */ @@ -2574,6 +2590,7 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, + &target_core_alua_tg_pt_gp_implict_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index aa626774638..5ad972856a8 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -643,9 +643,8 @@ void core_dev_unexport( lun->lun_se_dev = NULL; } -int target_report_luns(struct se_task *se_task) +int target_report_luns(struct se_cmd *se_cmd) { - struct se_cmd *se_cmd = se_task->task_se_cmd; struct se_dev_entry *deve; struct se_session *se_sess = se_cmd->se_sess; unsigned char *buf; @@ -696,8 +695,7 @@ done: buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(se_cmd); - se_task->task_scsi_status = GOOD; - transport_complete_task(se_task, 1); + target_complete_cmd(se_cmd, GOOD); return 0; } @@ -878,15 +876,12 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* - * max_sectors is based on subsystem plugin dependent requirements. + * Align max_hw_sectors down to PAGE_SIZE I/O transfers */ - dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, + limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors, limits->logical_block_size); - dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + /* * Set fabric_max_sectors, which is reported in block limits * VPD page (B0h). @@ -1170,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) return 0; } -int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) -{ - int force = 0; /* Force setting for VDEVS */ - - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - pr_err("dev[%p]: Unable to change SE Device" - " max_sectors while dev_export_obj: %d count exists\n", - dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -EINVAL; - } - if (!max_sectors) { - pr_err("dev[%p]: Illegal ZERO value for" - " max_sectors\n", dev); - return -EINVAL; - } - if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - pr_err("dev[%p]: Passed max_sectors: %u less than" - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MIN); - return -EINVAL; - } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors:" - " %u\n", dev, max_sectors, - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - } else { - if (!force && (max_sectors > - dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors" - ": %u, use force=1 to override.\n", dev, - max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } - } - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - max_sectors = se_dev_align_max_sectors(max_sectors, - dev->se_sub_dev->se_dev_attrib.block_size); - - dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", - dev, max_sectors); - return 0; -} - int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { @@ -1341,7 +1278,6 @@ struct se_lun *core_dev_add_lun( u32 lun) { struct se_lun *lun_p; - u32 lun_access = 0; int rc; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { @@ -1354,12 +1290,8 @@ struct se_lun *core_dev_add_lun( if (IS_ERR(lun_p)) return lun_p; - if (dev->dev_flags & DF_READ_ONLY) - lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; - else - lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; - - rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); + rc = core_tpg_post_addlun(tpg, lun_p, + TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index f286955331a..686dba189f8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -133,15 +133,10 @@ static struct se_device *fd_create_virtdevice( ret = PTR_ERR(dev_p); goto fail; } -#if 0 - if (di->no_create_file) - flags = O_RDWR | O_LARGEFILE; - else - flags = O_RDWR | O_CREAT | O_LARGEFILE; -#else + + /* O_DIRECT too? */ flags = O_RDWR | O_CREAT | O_LARGEFILE; -#endif -/* flags |= O_DIRECT; */ + /* * If fd_buffered_io=1 has not been set explicitly (the default), * use O_SYNC to force FILEIO writes to disk. @@ -249,53 +244,33 @@ static void fd_free_device(void *p) kfree(fd_dev); } -static inline struct fd_request *FILE_REQ(struct se_task *task) -{ - return container_of(task, struct fd_request, fd_task); -} - - -static struct se_task * -fd_alloc_task(unsigned char *cdb) +static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents) { - struct fd_request *fd_req; - - fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); - if (!fd_req) { - pr_err("Unable to allocate struct fd_request\n"); - return NULL; - } - - return &fd_req->fd_task; -} - -static int fd_do_readv(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct se_device *se_dev = cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; - struct scatterlist *sg = task->task_sg; + struct scatterlist *sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * + loff_t pos = (cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_readv iov[]\n"); return -ENOMEM; } - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); + ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); set_fs(old_fs); kfree(iov); @@ -305,10 +280,10 @@ static int fd_do_readv(struct se_task *task) * block_device. */ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { - if (ret < 0 || ret != task->task_size) { + if (ret < 0 || ret != cmd->data_length) { pr_err("vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, - (int)task->task_size); + (int)cmd->data_length); return (ret < 0 ? ret : -EINVAL); } } else { @@ -322,38 +297,38 @@ static int fd_do_readv(struct se_task *task) return 1; } -static int fd_do_writev(struct se_task *task) +static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents) { - struct fd_request *req = FILE_REQ(task); - struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct se_device *se_dev = cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; - struct scatterlist *sg = task->task_sg; + struct scatterlist *sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * + loff_t pos = (cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_writev iov[]\n"); return -ENOMEM; } - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); + ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); set_fs(old_fs); kfree(iov); - if (ret < 0 || ret != task->task_size) { + if (ret < 0 || ret != cmd->data_length) { pr_err("vfs_writev() returned %d\n", ret); return (ret < 0 ? ret : -EINVAL); } @@ -361,9 +336,8 @@ static int fd_do_writev(struct se_task *task) return 1; } -static void fd_emulate_sync_cache(struct se_task *task) +static void fd_emulate_sync_cache(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); @@ -375,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_task *task) * for this SYNCHRONIZE_CACHE op */ if (immed) - transport_complete_sync_cache(cmd, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); /* * Determine if we will be flushing the entire device. @@ -395,33 +369,37 @@ static void fd_emulate_sync_cache(struct se_task *task) if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); - if (!immed) - transport_complete_sync_cache(cmd, ret == 0); + if (immed) + return; + + if (ret) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + } else { + target_complete_cmd(cmd, SAM_STAT_GOOD); + } } -/* - * WRITE Force Unit Access (FUA) emulation on a per struct se_task - * LBA range basis.. - */ -static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) +static void fd_emulate_write_fua(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; - loff_t end = start + task->task_size; + loff_t start = cmd->t_task_lba * + dev->se_sub_dev->se_dev_attrib.block_size; + loff_t end = start + cmd->data_length; int ret; pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", - task->task_lba, task->task_size); + cmd->t_task_lba, cmd->data_length); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); } -static int fd_do_task(struct se_task *task) +static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; int ret = 0; @@ -429,10 +407,10 @@ static int fd_do_task(struct se_task *task) * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. */ - if (task->task_data_direction == DMA_FROM_DEVICE) { - ret = fd_do_readv(task); + if (data_direction == DMA_FROM_DEVICE) { + ret = fd_do_readv(cmd, sgl, sgl_nents); } else { - ret = fd_do_writev(task); + ret = fd_do_writev(cmd, sgl, sgl_nents); if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && @@ -443,7 +421,7 @@ static int fd_do_task(struct se_task *task) * and return some sense data to let the initiator * know the FUA WRITE cache sync failed..? */ - fd_emulate_write_fua(cmd, task); + fd_emulate_write_fua(cmd); } } @@ -452,24 +430,11 @@ static int fd_do_task(struct se_task *task) cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return ret; } - if (ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } -/* fd_free_task(): (Part of se_subsystem_api_t template) - * - * - */ -static void fd_free_task(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - - kfree(req); -} - enum { Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err }; @@ -632,10 +597,8 @@ static struct se_subsystem_api fileio_template = { .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, - .alloc_task = fd_alloc_task, - .do_task = fd_do_task, + .execute_cmd = fd_execute_cmd, .do_sync_cache = fd_emulate_sync_cache, - .free_task = fd_free_task, .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 59e6e73106c..fbd59ef7d8b 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -12,10 +12,6 @@ #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 -struct fd_request { - struct se_task fd_task; -}; - #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 #define FDBD_USE_BUFFERED_IO 0x04 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 2ec299e8a73..fd47950727b 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -189,26 +189,6 @@ static void iblock_free_device(void *p) kfree(ib_dev); } -static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) -{ - return container_of(task, struct iblock_req, ib_task); -} - -static struct se_task * -iblock_alloc_task(unsigned char *cdb) -{ - struct iblock_req *ib_req; - - ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); - if (!ib_req) { - pr_err("Unable to allocate memory for struct iblock_req\n"); - return NULL; - } - - atomic_set(&ib_req->pending, 1); - return &ib_req->ib_task; -} - static unsigned long long iblock_emulate_read_cap_with_block_size( struct se_device *dev, struct block_device *bd, @@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err) if (err) pr_err("IBLOCK: cache flush failed: %d\n", err); - if (cmd) - transport_complete_sync_cache(cmd, err == 0); + if (cmd) { + if (err) { + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + } else { + target_complete_cmd(cmd, SAM_STAT_GOOD); + } + } + bio_put(bio); } @@ -304,9 +292,8 @@ static void iblock_end_io_flush(struct bio *bio, int err) * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache. */ -static void iblock_emulate_sync_cache(struct se_task *task) +static void iblock_emulate_sync_cache(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); struct bio *bio; @@ -316,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) * for this SYNCHRONIZE_CACHE op. */ if (immed) - transport_complete_sync_cache(cmd, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; @@ -335,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); } -static void iblock_free_task(struct se_task *task) -{ - kfree(IBLOCK_REQ(task)); -} - enum { Opt_udev_path, Opt_force, Opt_err }; @@ -448,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params( return bl; } +static void iblock_complete_cmd(struct se_cmd *cmd) +{ + struct iblock_req *ibr = cmd->priv; + u8 status; + + if (!atomic_dec_and_test(&ibr->pending)) + return; + + if (atomic_read(&ibr->ib_bio_err_cnt)) + status = SAM_STAT_CHECK_CONDITION; + else + status = SAM_STAT_GOOD; + + target_complete_cmd(cmd, status); + kfree(ibr); +} + static void iblock_bio_destructor(struct bio *bio) { - struct se_task *task = bio->bi_private; - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; + struct se_cmd *cmd = bio->bi_private; + struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; bio_free(bio, ib_dev->ibd_bio_set); } static struct bio * -iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) +iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) { - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; - struct iblock_req *ib_req = IBLOCK_REQ(task); + struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; struct bio *bio; /* @@ -476,19 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) return NULL; } - pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" - " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); - pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); - bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = task; + bio->bi_private = cmd; bio->bi_destructor = iblock_bio_destructor; bio->bi_end_io = &iblock_bio_done; bio->bi_sector = lba; - atomic_inc(&ib_req->pending); - - pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); - pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending)); return bio; } @@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw) blk_finish_plug(&plug); } -static int iblock_do_task(struct se_task *task) +static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - struct iblock_req *ibr = IBLOCK_REQ(task); + struct iblock_req *ibr; struct bio *bio; struct bio_list list; struct scatterlist *sg; - u32 i, sg_num = task->task_sg_nents; + u32 sg_num = sgl_nents; sector_t block_lba; unsigned bio_cnt; int rw; + int i; - if (task->task_data_direction == DMA_TO_DEVICE) { + if (data_direction == DMA_TO_DEVICE) { /* * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. @@ -532,17 +523,17 @@ static int iblock_do_task(struct se_task *task) } /* - * Do starting conversion up from non 512-byte blocksize with - * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. + * Convert the blocksize advertised to the initiator to the 512 byte + * units unconditionally used by the Linux block layer. */ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) - block_lba = (task->task_lba << 3); + block_lba = (cmd->t_task_lba << 3); else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) - block_lba = (task->task_lba << 2); + block_lba = (cmd->t_task_lba << 2); else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) - block_lba = (task->task_lba << 1); + block_lba = (cmd->t_task_lba << 1); else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) - block_lba = task->task_lba; + block_lba = cmd->t_task_lba; else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); @@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task) return -ENOSYS; } - bio = iblock_get_bio(task, block_lba, sg_num); - if (!bio) { - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENOMEM; - } + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + bio = iblock_get_bio(cmd, block_lba, sgl_nents); + if (!bio) + goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); + + atomic_set(&ibr->pending, 2); bio_cnt = 1; - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and @@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task) bio_cnt = 0; } - bio = iblock_get_bio(task, block_lba, sg_num); + bio = iblock_get_bio(cmd, block_lba, sg_num); if (!bio) - goto fail; + goto fail_put_bios; + + atomic_inc(&ibr->pending); bio_list_add(&list, bio); bio_cnt++; } @@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task) } iblock_submit_bios(&list, rw); - - if (atomic_dec_and_test(&ibr->pending)) { - transport_complete_task(task, - !atomic_read(&ibr->ib_bio_err_cnt)); - } + iblock_complete_cmd(cmd); return 0; -fail: +fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); +fail_free_ibr: + kfree(ibr); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +fail: return -ENOMEM; } @@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev) static void iblock_bio_done(struct bio *bio, int err) { - struct se_task *task = bio->bi_private; - struct iblock_req *ibr = IBLOCK_REQ(task); + struct se_cmd *cmd = bio->bi_private; + struct iblock_req *ibr = cmd->priv; /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 @@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err) bio_put(bio); - if (!atomic_dec_and_test(&ibr->pending)) - return; - - pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, - (unsigned long long)bio->bi_sector, err); - - transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); + iblock_complete_cmd(cmd); } static struct se_subsystem_api iblock_template = { @@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = { .allocate_virtdevice = iblock_allocate_virtdevice, .create_virtdevice = iblock_create_virtdevice, .free_device = iblock_free_device, - .alloc_task = iblock_alloc_task, - .do_task = iblock_do_task, + .execute_cmd = iblock_execute_cmd, .do_discard = iblock_do_discard, .do_sync_cache = iblock_emulate_sync_cache, - .free_task = iblock_free_task, .check_configfs_dev_params = iblock_check_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index e929370b6fd..66cf7b9e205 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -7,7 +7,6 @@ #define IBLOCK_LBA_SHIFT 9 struct iblock_req { - struct se_task ib_task; atomic_t pending; atomic_t ib_bio_err_cnt; } ____cacheline_aligned; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 21c05638f15..165e8242968 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -5,15 +5,15 @@ extern struct t10_alua_lu_gp *default_lu_gp; /* target_core_cdb.c */ -int target_emulate_inquiry(struct se_task *task); -int target_emulate_readcapacity(struct se_task *task); -int target_emulate_readcapacity_16(struct se_task *task); -int target_emulate_modesense(struct se_task *task); -int target_emulate_request_sense(struct se_task *task); -int target_emulate_unmap(struct se_task *task); -int target_emulate_write_same(struct se_task *task); -int target_emulate_synchronize_cache(struct se_task *task); -int target_emulate_noop(struct se_task *task); +int target_emulate_inquiry(struct se_cmd *cmd); +int target_emulate_readcapacity(struct se_cmd *cmd); +int target_emulate_readcapacity_16(struct se_cmd *cmd); +int target_emulate_modesense(struct se_cmd *cmd); +int target_emulate_request_sense(struct se_cmd *cmd); +int target_emulate_unmap(struct se_cmd *cmd); +int target_emulate_write_same(struct se_cmd *cmd); +int target_emulate_synchronize_cache(struct se_cmd *cmd); +int target_emulate_noop(struct se_cmd *cmd); /* target_core_device.c */ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); @@ -28,7 +28,7 @@ int core_dev_export(struct se_device *, struct se_portal_group *, struct se_lun *); void core_dev_unexport(struct se_device *, struct se_portal_group *, struct se_lun *); -int target_report_luns(struct se_task *); +int target_report_luns(struct se_cmd *); void se_release_device_for_hba(struct se_device *); void se_release_vpd_for_dev(struct se_device *); int se_free_virtual_device(struct se_device *, struct se_hba *); @@ -104,8 +104,7 @@ void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); void transport_cmd_finish_abort(struct se_cmd *, int); -void __transport_remove_task_from_execute_queue(struct se_task *, - struct se_device *); +void __target_remove_from_execute_list(struct se_cmd *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, @@ -114,7 +113,7 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); -bool target_stop_task(struct se_task *task, unsigned long *flags); +bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); int transport_clear_lun_from_sessions(struct se_lun *); void transport_send_task_abort(struct se_cmd *); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c3148b10b4b..85564998500 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -193,9 +193,8 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd) return 0; } -int target_scsi2_reservation_release(struct se_task *task) +int target_scsi2_reservation_release(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; @@ -237,16 +236,13 @@ int target_scsi2_reservation_release(struct se_task *task) out_unlock: spin_unlock(&dev->dev_reservation_lock); out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } -int target_scsi2_reservation_reserve(struct se_task *task) +int target_scsi2_reservation_reserve(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; @@ -307,10 +303,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) out_unlock: spin_unlock(&dev->dev_reservation_lock); out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -503,11 +497,10 @@ static int core_scsi3_pr_seq_non_holder( * statement. */ if (!ret && !other_cdb) { -#if 0 pr_debug("Allowing explict CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); -#endif + return ret; } /* @@ -535,14 +528,14 @@ static int core_scsi3_pr_seq_non_holder( * as we expect registered non-reservation holding * nexuses to issue CDBs. */ -#if 0 + if (!registered_nexus) { pr_debug("Allowing implict CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); } -#endif + return 0; } } else if ((reg_only) || (all_reg)) { @@ -551,11 +544,11 @@ static int core_scsi3_pr_seq_non_holder( * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, * allow commands from registered nexuses. */ -#if 0 + pr_debug("Allowing implict CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); -#endif + return 0; } } @@ -1669,12 +1662,12 @@ static int core_scsi3_decode_spec_i_port( ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" " tid_len: %d for %s + %s\n", dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, tpdl, tid_len, i_str, iport_ptr); -#endif + if (tid_len > tpdl) { pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" " %u for Transport ID: %s\n", tid_len, ptr); @@ -1717,12 +1710,12 @@ static int core_scsi3_decode_spec_i_port( ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" " dest_se_deve mapped_lun: %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); -#endif + /* * Skip any TransportIDs that already have a registration for * this target port. @@ -3476,10 +3469,10 @@ static int core_scsi3_emulate_pro_register_and_move( buf = transport_kmap_data_sg(cmd); proto_ident = (buf[24] & 0x0f); -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" " 0x%02x\n", proto_ident); -#endif + if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" " proto_ident: 0x%02x does not match ident: 0x%02x" @@ -3578,11 +3571,11 @@ after_iport_check: ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" " %s from TransportID\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname); -#endif + /* * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET * PORT IDENTIFIER. @@ -3606,12 +3599,12 @@ after_iport_check: ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" " ACL for dest_se_deve->mapped_lun: %u\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); -#endif + /* * A persistent reservation needs to already existing in order to * successfully complete the REGISTER_AND_MOVE service action.. @@ -3802,9 +3795,8 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) /* * See spc4r17 section 6.14 Table 170 */ -int target_scsi3_emulate_pr_out(struct se_task *task) +int target_scsi3_emulate_pr_out(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; unsigned char *cdb = &cmd->t_task_cdb[0]; unsigned char *buf; u64 res_key, sa_res_key; @@ -3944,10 +3936,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) } out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -4302,9 +4292,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return 0; } -int target_scsi3_emulate_pr_in(struct se_task *task) +int target_scsi3_emulate_pr_in(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; int ret; /* @@ -4345,10 +4334,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) break; } - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 7a233feb7e9..af6c460d886 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache; extern int core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); -extern int target_scsi2_reservation_release(struct se_task *task); -extern int target_scsi2_reservation_reserve(struct se_task *task); +extern int target_scsi2_reservation_release(struct se_cmd *); +extern int target_scsi2_reservation_reserve(struct se_cmd *); extern int core_scsi3_alloc_aptpl_registration( struct t10_reservation *, u64, unsigned char *, unsigned char *, u32, @@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, extern void core_scsi3_free_all_registrations(struct se_device *); extern unsigned char *core_scsi3_pr_dump_type(int); -extern int target_scsi3_emulate_pr_in(struct se_task *task); -extern int target_scsi3_emulate_pr_out(struct se_task *task); +extern int target_scsi3_emulate_pr_in(struct se_cmd *); +extern int target_scsi3_emulate_pr_out(struct se_cmd *); extern int core_setup_reservations(struct se_device *, int); #endif /* TARGET_CORE_PR_H */ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 94c905fcbce..4ce2cf642fc 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -663,22 +663,12 @@ static void pscsi_free_device(void *p) kfree(pdv); } -static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) +static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) { - return container_of(task, struct pscsi_plugin_task, pscsi_task); -} - - -/* pscsi_transport_complete(): - * - * - */ -static int pscsi_transport_complete(struct se_task *task) -{ - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct scsi_device *sd = pdv->pdv_sd; int result; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_plugin_task *pt = cmd->priv; unsigned char *cdb = &pt->pscsi_cdb[0]; result = pt->pscsi_result; @@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task) */ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { - if (!task->task_se_cmd->se_deve) + if (!cmd->se_deve) goto after_mode_sense; - if (task->task_se_cmd->se_deve->lun_flags & - TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); + if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { + unsigned char *buf = transport_kmap_data_sg(cmd); if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task) buf[2] |= 0x80; } - transport_kunmap_data_sg(task->task_se_cmd); + transport_kunmap_data_sg(cmd); } } after_mode_sense: @@ -722,7 +711,6 @@ after_mode_sense: if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { unsigned char *buf; - struct scatterlist *sg = task->task_sg; u16 bdl; u32 blocksize; @@ -757,35 +745,6 @@ after_mode_select: return 0; } -static struct se_task * -pscsi_alloc_task(unsigned char *cdb) -{ - struct pscsi_plugin_task *pt; - - /* - * Dynamically alloc cdb space, since it may be larger than - * TCM_MAX_COMMAND_SIZE - */ - pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); - if (!pt) { - pr_err("Unable to allocate struct pscsi_plugin_task\n"); - return NULL; - } - - return &pt->pscsi_task; -} - -static void pscsi_free_task(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - - /* - * We do not release the bio(s) here associated with this task, as - * this is handled by bio_put() and pscsi_bi_endio(). - */ - kfree(pt); -} - enum { Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, Opt_scsi_lun_id, Opt_err @@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, +static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction, struct bio **hbio) { - struct se_cmd *cmd = task->task_se_cmd; - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; - u32 task_sg_num = task->task_sg_nents; + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct bio *bio = NULL, *tbio = NULL; struct page *page; struct scatterlist *sg; - u32 data_len = task->task_size, i, len, bytes, off; - int nr_pages = (task->task_size + task_sg[0].offset + + u32 data_len = cmd->data_length, i, len, bytes, off; + int nr_pages = (cmd->data_length + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; int nr_vecs = 0, rc; - int rw = (task->task_data_direction == DMA_TO_DEVICE); + int rw = (data_direction == DMA_TO_DEVICE); *hbio = NULL; pr_debug("PSCSI: nr_pages: %d\n", nr_pages); - for_each_sg(task_sg, sg, task_sg_num, i) { + for_each_sg(sgl, sg, sgl_nents, i) { page = sg_page(sg); off = sg->offset; len = sg->length; @@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, * Set *hbio pointer to handle the case: * nr_pages > BIO_MAX_PAGES, where additional * bios need to be added to complete a given - * struct se_task + * command. */ if (!*hbio) *hbio = tbio = bio; @@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, } } - return task->task_sg_nents; + return sgl_nents; fail: while (*hbio) { bio = *hbio; @@ -1061,52 +1019,61 @@ fail: return -ENOMEM; } -static int pscsi_do_task(struct se_task *task) +static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; + struct pscsi_plugin_task *pt; struct request *req; struct bio *hbio; int ret; - target_get_task_cdb(task, pt->pscsi_cdb); + /* + * Dynamically alloc cdb space, since it may be larger than + * TCM_MAX_COMMAND_SIZE + */ + pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); + if (!pt) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; + } + cmd->priv = pt; + + memcpy(pt->pscsi_cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); - if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { + if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { req = blk_get_request(pdv->pdv_sd->request_queue, - (task->task_data_direction == DMA_TO_DEVICE), + (data_direction == DMA_TO_DEVICE), GFP_KERNEL); if (!req || IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed: %ld\n", req ? IS_ERR(req) : -ENOMEM); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENODEV; + goto fail; } } else { - BUG_ON(!task->task_size); + BUG_ON(!cmd->data_length); - /* - * Setup the main struct request for the task->task_sg[] payload - */ - ret = pscsi_map_sg(task, task->task_sg, &hbio); + ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); if (ret < 0) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return ret; + goto fail; } req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); if (IS_ERR(req)) { pr_err("pSCSI: blk_make_request() failed\n"); - goto fail; + goto fail_free_bio; } } req->cmd_type = REQ_TYPE_BLOCK_PC; req->end_io = pscsi_req_done; - req->end_io_data = task; + req->end_io_data = cmd; req->cmd_len = scsi_command_size(pt->pscsi_cdb); req->cmd = &pt->pscsi_cdb[0]; req->sense = &pt->pscsi_sense[0]; @@ -1118,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task) req->retries = PS_RETRY; blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, - (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), + (cmd->sam_task_attr == MSG_HEAD_TAG), pscsi_req_done); return 0; -fail: +fail_free_bio: while (hbio) { struct bio *bio = hbio; hbio = hbio->bi_next; @@ -1131,16 +1098,14 @@ fail: bio_endio(bio, 0); /* XXX: should be error */ } cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +fail: + kfree(pt); return -ENOMEM; } -/* pscsi_get_sense_buffer(): - * - * - */ -static unsigned char *pscsi_get_sense_buffer(struct se_task *task) +static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd) { - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_plugin_task *pt = cmd->priv; return pt->pscsi_sense; } @@ -1180,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev) return 0; } -/* pscsi_handle_SAM_STATUS_failures(): - * - * - */ -static inline void pscsi_process_SAM_status( - struct se_task *task, - struct pscsi_plugin_task *pt) +static void pscsi_req_done(struct request *req, int uptodate) { - task->task_scsi_status = status_byte(pt->pscsi_result); - if (task->task_scsi_status) { - task->task_scsi_status <<= 1; - pr_debug("PSCSI Status Byte exception at task: %p CDB:" - " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + struct se_cmd *cmd = req->end_io_data; + struct pscsi_plugin_task *pt = cmd->priv; + + pt->pscsi_result = req->errors; + pt->pscsi_resid = req->resid_len; + + cmd->scsi_status = status_byte(pt->pscsi_result) << 1; + if (cmd->scsi_status) { + pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], pt->pscsi_result); } switch (host_byte(pt->pscsi_result)) { case DID_OK: - transport_complete_task(task, (!task->task_scsi_status)); + target_complete_cmd(cmd, cmd->scsi_status); break; default: - pr_debug("PSCSI Host Byte exception at task: %p CDB:" - " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], pt->pscsi_result); - task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_se_cmd->scsi_sense_reason = - TCM_UNSUPPORTED_SCSI_OPCODE; - transport_complete_task(task, 0); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); break; } -} -static void pscsi_req_done(struct request *req, int uptodate) -{ - struct se_task *task = req->end_io_data; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - - pt->pscsi_result = req->errors; - pt->pscsi_resid = req->resid_len; - - pscsi_process_SAM_status(task, pt); __blk_put_request(req->q, req); + kfree(pt); } static struct se_subsystem_api pscsi_template = { @@ -1235,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = { .create_virtdevice = pscsi_create_virtdevice, .free_device = pscsi_free_device, .transport_complete = pscsi_transport_complete, - .alloc_task = pscsi_alloc_task, - .do_task = pscsi_do_task, - .free_task = pscsi_free_task, + .execute_cmd = pscsi_execute_cmd, .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 43f1c419e8e..bc1e5e11eca 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -22,7 +22,6 @@ #include <linux/kobject.h> struct pscsi_plugin_task { - struct se_task pscsi_task; unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; int pscsi_direction; int pscsi_result; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 8b68f7b8263..d0ceb873c0e 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -64,9 +64,6 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); - pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" - " MaxSectors: %u\n", hba->hba_id, - rd_host->rd_host_id, RD_MAX_SECTORS); return 0; } @@ -199,10 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) return 0; } -static void *rd_allocate_virtdevice( - struct se_hba *hba, - const char *name, - int rd_direct) +static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) { struct rd_dev *rd_dev; struct rd_host *rd_host = hba->hba_ptr; @@ -214,25 +208,12 @@ static void *rd_allocate_virtdevice( } rd_dev->rd_host = rd_host; - rd_dev->rd_direct = rd_direct; return rd_dev; } -static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) -{ - return rd_allocate_virtdevice(hba, name, 0); -} - -/* rd_create_virtdevice(): - * - * - */ -static struct se_device *rd_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p, - int rd_direct) +static struct se_device *rd_create_virtdevice(struct se_hba *hba, + struct se_subsystem_dev *se_dev, void *p) { struct se_device *dev; struct se_dev_limits dev_limits; @@ -247,13 +228,12 @@ static struct se_device *rd_create_virtdevice( if (ret < 0) goto fail; - snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); - snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : - RD_MCP_VERSION); + snprintf(prod, 16, "RAMDISK-MCP"); + snprintf(rev, 4, "%s", RD_MCP_VERSION); dev_limits.limits.logical_block_size = RD_BLOCKSIZE; - dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; - dev_limits.limits.max_sectors = RD_MAX_SECTORS; + dev_limits.limits.max_hw_sectors = UINT_MAX; + dev_limits.limits.max_sectors = UINT_MAX; dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; @@ -264,12 +244,10 @@ static struct se_device *rd_create_virtdevice( goto fail; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; - rd_dev->rd_queue_depth = dev->queue_depth; - pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" + pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" " %u pages in %u tables, %lu total bytes\n", - rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : - "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count, (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); @@ -280,18 +258,6 @@ fail: return ERR_PTR(ret); } -static struct se_device *rd_MEMCPY_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p) -{ - return rd_create_virtdevice(hba, se_dev, p, 0); -} - -/* rd_free_device(): (Part of se_subsystem_api_t template) - * - * - */ static void rd_free_device(void *p) { struct rd_dev *rd_dev = p; @@ -300,29 +266,6 @@ static void rd_free_device(void *p) kfree(rd_dev); } -static inline struct rd_request *RD_REQ(struct se_task *task) -{ - return container_of(task, struct rd_request, rd_task); -} - -static struct se_task * -rd_alloc_task(unsigned char *cdb) -{ - struct rd_request *rd_req; - - rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); - if (!rd_req) { - pr_err("Unable to allocate struct rd_request\n"); - return NULL; - } - - return &rd_req->rd_task; -} - -/* rd_get_sg_table(): - * - * - */ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) { u32 i; @@ -341,31 +284,41 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } -static int rd_MEMCPY(struct rd_request *req, u32 read_rd) +static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; + struct se_device *se_dev = cmd->se_dev; + struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; - u32 rd_offset = req->rd_offset; + u32 rd_offset; + u32 rd_size; + u32 rd_page; u32 src_len; + u64 tmp; + + tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; + rd_offset = do_div(tmp, PAGE_SIZE); + rd_page = tmp; + rd_size = cmd->data_length; - table = rd_get_sg_table(dev, req->rd_page); + table = rd_get_sg_table(dev, rd_page); if (!table) return -EINVAL; - rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; + rd_sg = &table->sg_table[rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", - dev->rd_dev_id, read_rd ? "Read" : "Write", - task->task_lba, req->rd_size, req->rd_page, - rd_offset); + dev->rd_dev_id, + data_direction == DMA_FROM_DEVICE ? "Read" : "Write", + cmd->t_task_lba, rd_size, rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; - sg_miter_start(&m, task->task_sg, task->task_sg_nents, - read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); - while (req->rd_size) { + sg_miter_start(&m, sgl, sgl_nents, + data_direction == DMA_FROM_DEVICE ? + SG_MITER_TO_SG : SG_MITER_FROM_SG); + while (rd_size) { u32 len; void *rd_addr; @@ -375,13 +328,13 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) rd_addr = sg_virt(rd_sg) + rd_offset; - if (read_rd) + if (data_direction == DMA_FROM_DEVICE) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); - req->rd_size -= len; - if (!req->rd_size) + rd_size -= len; + if (!rd_size) continue; src_len -= len; @@ -391,15 +344,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) } /* rd page completed, next one please */ - req->rd_page++; + rd_page++; rd_offset = 0; src_len = PAGE_SIZE; - if (req->rd_page <= table->page_end_offset) { + if (rd_page <= table->page_end_offset) { rd_sg++; continue; } - table = rd_get_sg_table(dev, req->rd_page); + table = rd_get_sg_table(dev, rd_page); if (!table) { sg_miter_stop(&m); return -EINVAL; @@ -409,43 +362,11 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) rd_sg = table->sg_table; } sg_miter_stop(&m); - return 0; -} -/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int rd_MEMCPY_do_task(struct se_task *task) -{ - struct se_device *dev = task->task_se_cmd->se_dev; - struct rd_request *req = RD_REQ(task); - u64 tmp; - int ret; - - tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; - req->rd_offset = do_div(tmp, PAGE_SIZE); - req->rd_page = tmp; - req->rd_size = task->task_size; - - ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); - if (ret != 0) - return ret; - - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } -/* rd_free_task(): (Part of se_subsystem_api_t template) - * - * - */ -static void rd_free_task(struct se_task *task) -{ - kfree(RD_REQ(task)); -} - enum { Opt_rd_pages, Opt_err }; @@ -512,9 +433,8 @@ static ssize_t rd_show_configfs_dev_params( char *b) { struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; - ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", - rd_dev->rd_dev_id, (rd_dev->rd_direct) ? - "rd_direct" : "rd_mcp"); + ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", + rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" " SG_table_count: %u\n", rd_dev->rd_page_count, PAGE_SIZE, rd_dev->sg_table_count); @@ -545,12 +465,10 @@ static struct se_subsystem_api rd_mcp_template = { .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .attach_hba = rd_attach_hba, .detach_hba = rd_detach_hba, - .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, - .create_virtdevice = rd_MEMCPY_create_virtdevice, + .allocate_virtdevice = rd_allocate_virtdevice, + .create_virtdevice = rd_create_virtdevice, .free_device = rd_free_device, - .alloc_task = rd_alloc_task, - .do_task = rd_MEMCPY_do_task, - .free_task = rd_free_task, + .execute_cmd = rd_execute_cmd, .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 784e56a0410..21458125fe5 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -2,7 +2,6 @@ #define TARGET_CORE_RD_H #define RD_HBA_VERSION "v4.0" -#define RD_DR_VERSION "4.0" #define RD_MCP_VERSION "4.0" /* Largest piece of memory kmalloc can allocate */ @@ -10,28 +9,11 @@ #define RD_DEVICE_QUEUE_DEPTH 32 #define RD_MAX_DEVICE_QUEUE_DEPTH 128 #define RD_BLOCKSIZE 512 -#define RD_MAX_SECTORS 1024 /* Used in target_core_init_configfs() for virtual LUN 0 access */ int __init rd_module_init(void); void rd_module_exit(void); -#define RRF_EMULATE_CDB 0x01 -#define RRF_GOT_LBA 0x02 - -struct rd_request { - struct se_task rd_task; - - /* Offset from start of page */ - u32 rd_offset; - /* Starting page in Ramdisk for request */ - u32 rd_page; - /* Total number of pages needed for request */ - u32 rd_page_count; - /* Scatterlist count */ - u32 rd_size; -} ____cacheline_aligned; - struct rd_dev_sg_table { u32 page_start_offset; u32 page_end_offset; @@ -42,7 +24,6 @@ struct rd_dev_sg_table { #define RDF_HAS_PAGE_COUNT 0x01 struct rd_dev { - int rd_direct; u32 rd_flags; /* Unique Ramdisk Device ID in Ramdisk HBA */ u32 rd_dev_id; @@ -50,7 +31,6 @@ struct rd_dev { u32 rd_page_count; /* Number of SG tables in sg_table_array */ u32 sg_table_count; - u32 rd_queue_depth; /* Array of rd_dev_sg_table_t containing scatterlists */ struct rd_dev_sg_table *sg_table_array; /* Ramdisk HBA device is connected to */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index f015839aef8..84caf1bed9a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -244,7 +244,7 @@ static void core_tmr_drain_tmr_list( } } -static void core_tmr_drain_task_list( +static void core_tmr_drain_state_list( struct se_device *dev, struct se_cmd *prout_cmd, struct se_node_acl *tmr_nacl, @@ -252,12 +252,13 @@ static void core_tmr_drain_task_list( struct list_head *preempt_and_abort_list) { LIST_HEAD(drain_task_list); - struct se_cmd *cmd; - struct se_task *task, *task_tmp; + struct se_cmd *cmd, *next; unsigned long flags; int fe_count; + /* - * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. + * Complete outstanding commands with TASK_ABORTED SAM status. + * * This is following sam4r17, section 5.6 Aborting commands, Table 38 * for TMR LUN_RESET: * @@ -278,56 +279,43 @@ static void core_tmr_drain_task_list( * in the Control Mode Page. */ spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, - t_state_list) { - if (!task->task_se_cmd) { - pr_err("task->task_se_cmd is NULL!\n"); - continue; - } - cmd = task->task_se_cmd; - + list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) { /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; + /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. */ if (prout_cmd == cmd) continue; - list_move_tail(&task->t_state_list, &drain_task_list); - task->t_state_active = false; - /* - * Remove from task execute list before processing drain_task_list - */ - if (!list_empty(&task->t_execute_list)) - __transport_remove_task_from_execute_queue(task, dev); + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + + if (!list_empty(&cmd->execute_list)) + __target_remove_from_execute_list(cmd); } spin_unlock_irqrestore(&dev->execute_task_lock, flags); while (!list_empty(&drain_task_list)) { - task = list_entry(drain_task_list.next, struct se_task, t_state_list); - list_del(&task->t_state_list); - cmd = task->task_se_cmd; + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); + list_del(&cmd->state_list); - pr_debug("LUN_RESET: %s cmd: %p task: %p" + pr_debug("LUN_RESET: %s cmd: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" "cdb: 0x%02x\n", - (preempt_and_abort_list) ? "Preempt" : "", cmd, task, + (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->t_task_cdb[0]); pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" - " t_task_cdbs: %d t_task_cdbs_left: %d" - " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d" + " -- CMD_T_ACTIVE: %d" " CMD_T_STOP: %d CMD_T_SENT: %d\n", cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), (cmd->transport_state & CMD_T_ACTIVE) != 0, (cmd->transport_state & CMD_T_STOP) != 0, (cmd->transport_state & CMD_T_SENT) != 0); @@ -343,20 +331,13 @@ static void core_tmr_drain_task_list( cancel_work_sync(&cmd->work); spin_lock_irqsave(&cmd->t_state_lock, flags); - target_stop_task(task, &flags); + target_stop_cmd(cmd, &flags); - if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" - " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task_cdbs_ex_left)); - continue; - } fe_count = atomic_read(&cmd->t_fe_count); if (!(cmd->transport_state & CMD_T_ACTIVE)) { pr_debug("LUN_RESET: got CMD_T_ACTIVE for" - " task: %p, t_fe_count: %d dev: %p\n", task, + " cdb: %p, t_fe_count: %d dev: %p\n", cmd, fe_count, dev); cmd->transport_state |= CMD_T_ABORTED; spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -364,8 +345,8 @@ static void core_tmr_drain_task_list( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } - pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p," - " t_fe_count: %d dev: %p\n", task, fe_count, dev); + pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p," + " t_fe_count: %d dev: %p\n", cmd, fe_count, dev); cmd->transport_state |= CMD_T_ABORTED; spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -384,13 +365,11 @@ static void core_tmr_drain_cmd_list( struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_cmd *cmd, *tcmd; unsigned long flags; + /* - * Release all commands remaining in the struct se_device cmd queue. + * Release all commands remaining in the per-device command queue. * - * This follows the same logic as above for the struct se_device - * struct se_task state list, where commands are returned with - * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD - * reference, otherwise the struct se_cmd is released. + * This follows the same logic as above for the state list. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { @@ -466,7 +445,7 @@ int core_tmr_lun_reset( dev->transport->name, tas); core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); - core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index e320ec24aa1..8bd58e28418 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -153,10 +153,7 @@ void core_tpg_add_node_to_devs( * demo_mode_write_protect is ON, or READ_ONLY; */ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { - if (dev->dev_flags & DF_READ_ONLY) - lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; - else - lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; + lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; } else { /* * Allow only optical drives to issue R/W in default RO diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 443704f84fd..b05fdc0c05d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -72,7 +72,6 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); -static void transport_free_dev_tasks(struct se_cmd *cmd); static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); @@ -331,9 +330,9 @@ void target_get_session(struct se_session *se_sess) } EXPORT_SYMBOL(target_get_session); -int target_put_session(struct se_session *se_sess) +void target_put_session(struct se_session *se_sess) { - return kref_put(&se_sess->sess_kref, target_release_session); + kref_put(&se_sess->sess_kref, target_release_session); } EXPORT_SYMBOL(target_put_session); @@ -444,31 +443,23 @@ EXPORT_SYMBOL(transport_deregister_session); /* * Called with cmd->t_state_lock held. */ -static void transport_all_task_dev_remove_state(struct se_cmd *cmd) +static void target_remove_from_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - struct se_task *task; unsigned long flags; if (!dev) return; - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (task->task_flags & TF_ACTIVE) - continue; - - spin_lock_irqsave(&dev->execute_task_lock, flags); - if (task->t_state_active) { - pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", - cmd->se_tfo->get_task_tag(cmd), dev, task); + if (cmd->transport_state & CMD_T_BUSY) + return; - list_del(&task->t_state_list); - atomic_dec(&cmd->t_task_cdbs_ex_left); - task->t_state_active = false; - } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->execute_task_lock, flags); + if (cmd->state_active) { + list_del(&cmd->state_list); + cmd->state_active = false; } - + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } /* transport_cmd_check_stop(): @@ -497,7 +488,7 @@ static int transport_cmd_check_stop( cmd->transport_state &= ~CMD_T_ACTIVE; if (transport_off == 2) - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&cmd->transport_lun_stop_comp); @@ -513,7 +504,7 @@ static int transport_cmd_check_stop( cmd->se_tfo->get_task_tag(cmd)); if (transport_off == 2) - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * Clear struct se_cmd->se_lun before the transport_off == 2 handoff @@ -529,7 +520,7 @@ static int transport_cmd_check_stop( if (transport_off) { cmd->transport_state &= ~CMD_T_ACTIVE; if (transport_off == 2) { - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * Clear struct se_cmd->se_lun before the transport_off == 2 * handoff to fabric module. @@ -577,7 +568,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_DEV_ACTIVE) { cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -669,29 +660,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); } -/* - * Completion function used by TCM subsystem plugins (such as FILEIO) - * for queueing up response from struct se_subsystem_api->do_task() - */ -void transport_complete_sync_cache(struct se_cmd *cmd, int good) -{ - struct se_task *task = list_entry(cmd->t_task_list.next, - struct se_task, t_list); - - if (good) { - cmd->scsi_status = SAM_STAT_GOOD; - task->task_scsi_status = GOOD; - } else { - task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_se_cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - } - - transport_complete_task(task, good); -} -EXPORT_SYMBOL(transport_complete_sync_cache); - static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -699,40 +667,32 @@ static void target_complete_failure_work(struct work_struct *work) transport_generic_request_failure(cmd); } -/* transport_complete_task(): - * - * Called from interrupt and non interrupt context depending - * on the transport plugin. - */ -void transport_complete_task(struct se_task *task, int success) +void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; + int success = scsi_status == GOOD; unsigned long flags; + cmd->scsi_status = scsi_status; + + spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; + cmd->transport_state &= ~CMD_T_BUSY; - /* - * See if any sense data exists, if so set the TASK_SENSE flag. - * Also check for any other post completion work that needs to be - * done by the plugins. - */ if (dev && dev->transport->transport_complete) { - if (dev->transport->transport_complete(task) != 0) { + if (dev->transport->transport_complete(cmd, + cmd->t_data_sg) != 0) { cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; - task->task_flags |= TF_HAS_SENSE; success = 1; } } /* - * See if we are waiting for outstanding struct se_task - * to complete for an exception condition + * See if we are waiting to complete for an exception condition. */ - if (task->task_flags & TF_REQUEST_STOP) { + if (cmd->transport_state & CMD_T_REQUEST_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); + complete(&cmd->task_stop_comp); return; } @@ -740,15 +700,6 @@ void transport_complete_task(struct se_task *task, int success) cmd->transport_state |= CMD_T_FAILED; /* - * Decrement the outstanding t_task_cdbs_left count. The last - * struct se_task from struct se_cmd will complete itself into the - * device queue depending upon int success. - */ - if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - /* * Check for case where an explict ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ @@ -770,157 +721,77 @@ void transport_complete_task(struct se_task *task, int success) queue_work(target_completion_wq, &cmd->work); } -EXPORT_SYMBOL(transport_complete_task); - -/* - * Called by transport_add_tasks_from_cmd() once a struct se_cmd's - * struct se_task list are ready to be added to the active execution list - * struct se_device +EXPORT_SYMBOL(target_complete_cmd); - * Called with se_dev_t->execute_task_lock called. - */ -static inline int transport_add_task_check_sam_attr( - struct se_task *task, - struct se_task *task_prev, - struct se_device *dev) +static void target_add_to_state_list(struct se_cmd *cmd) { - /* - * No SAM Task attribute emulation enabled, add to tail of - * execution queue - */ - if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { - list_add_tail(&task->t_execute_list, &dev->execute_task_list); - return 0; - } - /* - * HEAD_OF_QUEUE attribute for received CDB, which means - * the first task that is associated with a struct se_cmd goes to - * head of the struct se_device->execute_task_list, and task_prev - * after that for each subsequent task - */ - if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { - list_add(&task->t_execute_list, - (task_prev != NULL) ? - &task_prev->t_execute_list : - &dev->execute_task_list); - - pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" - " in execution queue\n", - task->task_se_cmd->t_task_cdb[0]); - return 1; + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + if (!cmd->state_active) { + list_add_tail(&cmd->state_list, &dev->state_list); + cmd->state_active = true; } - /* - * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been - * transitioned from Dermant -> Active state, and are added to the end - * of the struct se_device->execute_task_list - */ - list_add_tail(&task->t_execute_list, &dev->execute_task_list); - return 0; + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -/* __transport_add_task_to_execute_queue(): - * - * Called with se_dev_t->execute_task_lock called. - */ -static void __transport_add_task_to_execute_queue( - struct se_task *task, - struct se_task *task_prev, - struct se_device *dev) +static void __target_add_to_execute_list(struct se_cmd *cmd) { - int head_of_queue; - - head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); - atomic_inc(&dev->execute_tasks); + struct se_device *dev = cmd->se_dev; + bool head_of_queue = false; - if (task->t_state_active) + if (!list_empty(&cmd->execute_list)) return; - /* - * Determine if this task needs to go to HEAD_OF_QUEUE for the - * state list as well. Running with SAM Task Attribute emulation - * will always return head_of_queue == 0 here - */ - if (head_of_queue) - list_add(&task->t_state_list, (task_prev) ? - &task_prev->t_state_list : - &dev->state_task_list); - else - list_add_tail(&task->t_state_list, &dev->state_task_list); - task->t_state_active = true; + if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED && + cmd->sam_task_attr == MSG_HEAD_TAG) + head_of_queue = true; - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), - task, dev); -} + if (head_of_queue) + list_add(&cmd->execute_list, &dev->execute_list); + else + list_add_tail(&cmd->execute_list, &dev->execute_list); -static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_task *task; - unsigned long flags; + atomic_inc(&dev->execute_tasks); - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry(task, &cmd->t_task_list, t_list) { - spin_lock(&dev->execute_task_lock); - if (!task->t_state_active) { - list_add_tail(&task->t_state_list, - &dev->state_task_list); - task->t_state_active = true; - - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag( - task->task_se_cmd), task, dev); - } - spin_unlock(&dev->execute_task_lock); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} + if (cmd->state_active) + return; -static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_task *task, *task_prev = NULL; + if (head_of_queue) + list_add(&cmd->state_list, &dev->state_list); + else + list_add_tail(&cmd->state_list, &dev->state_list); - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (!list_empty(&task->t_execute_list)) - continue; - /* - * __transport_add_task_to_execute_queue() handles the - * SAM Task Attribute emulation if enabled - */ - __transport_add_task_to_execute_queue(task, task_prev, dev); - task_prev = task; - } + cmd->state_active = true; } -static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +static void target_add_to_execute_list(struct se_cmd *cmd) { unsigned long flags; struct se_device *dev = cmd->se_dev; spin_lock_irqsave(&dev->execute_task_lock, flags); - __transport_add_tasks_from_cmd(cmd); + __target_add_to_execute_list(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -void __transport_remove_task_from_execute_queue(struct se_task *task, - struct se_device *dev) +void __target_remove_from_execute_list(struct se_cmd *cmd) { - list_del_init(&task->t_execute_list); - atomic_dec(&dev->execute_tasks); + list_del_init(&cmd->execute_list); + atomic_dec(&cmd->se_dev->execute_tasks); } -static void transport_remove_task_from_execute_queue( - struct se_task *task, - struct se_device *dev) +static void target_remove_from_execute_list(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; unsigned long flags; - if (WARN_ON(list_empty(&task->t_execute_list))) + if (WARN_ON(list_empty(&cmd->execute_list))) return; spin_lock_irqsave(&dev->execute_task_lock, flags); - __transport_remove_task_from_execute_queue(task, dev); + __target_remove_from_execute_list(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -999,8 +870,9 @@ void transport_dump_dev_state( *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", atomic_read(&dev->execute_tasks), dev->queue_depth); - *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); + *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", + dev->se_sub_dev->se_dev_attrib.block_size, + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); *bl += sprintf(b + *bl, " "); } @@ -1344,9 +1216,9 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); - INIT_LIST_HEAD(&dev->execute_task_list); + INIT_LIST_HEAD(&dev->execute_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->state_task_list); + INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); @@ -1457,6 +1329,7 @@ static inline void transport_generic_prepare_cdb( case VERIFY_16: /* SBC - VRProtect */ case WRITE_VERIFY: /* SBC - VRProtect */ case WRITE_VERIFY_12: /* SBC - VRProtect */ + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ break; default: cdb[1] &= 0x1f; /* clear logical unit number */ @@ -1464,29 +1337,6 @@ static inline void transport_generic_prepare_cdb( } } -static struct se_task * -transport_generic_get_task(struct se_cmd *cmd, - enum dma_data_direction data_direction) -{ - struct se_task *task; - struct se_device *dev = cmd->se_dev; - - task = dev->transport->alloc_task(cmd->t_task_cdb); - if (!task) { - pr_err("Unable to allocate struct se_task\n"); - return NULL; - } - - INIT_LIST_HEAD(&task->t_list); - INIT_LIST_HEAD(&task->t_execute_list); - INIT_LIST_HEAD(&task->t_state_list); - init_completion(&task->task_stop_comp); - task->task_se_cmd = cmd; - task->task_data_direction = data_direction; - - return task; -} - static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); /* @@ -1507,11 +1357,13 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); - INIT_LIST_HEAD(&cmd->t_task_list); + INIT_LIST_HEAD(&cmd->execute_list); + INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); + init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); cmd->transport_state = CMD_T_DEV_ACTIVE; @@ -1521,6 +1373,8 @@ void transport_init_se_cmd( cmd->data_direction = data_direction; cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; + + cmd->state_active = false; } EXPORT_SYMBOL(transport_init_se_cmd); @@ -1550,11 +1404,11 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -/* transport_generic_allocate_tasks(): +/* target_setup_cmd_from_cdb(): * * Called from fabric RX Thread. */ -int transport_generic_allocate_tasks( +int target_setup_cmd_from_cdb( struct se_cmd *cmd, unsigned char *cdb) { @@ -1620,7 +1474,7 @@ int transport_generic_allocate_tasks( spin_unlock(&cmd->se_lun->lun_sep_lock); return 0; } -EXPORT_SYMBOL(transport_generic_allocate_tasks); +EXPORT_SYMBOL(target_setup_cmd_from_cdb); /* * Used by fabric module frontends to queue tasks directly. @@ -1701,6 +1555,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, data_dir, task_attr, sense); + if (flags & TARGET_SCF_UNKNOWN_SIZE) + se_cmd->unknown_data_length = 1; /* * Obtain struct se_cmd->cmd_kref reference and add new cmd to * se_sess->sess_cmd_list. A second kref_get here is necessary @@ -1726,11 +1582,18 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, * Sanitize CDBs via transport_generic_cmd_sequencer() and * allocate the necessary tasks to complete the received CDB+data */ - rc = transport_generic_allocate_tasks(se_cmd, cdb); + rc = target_setup_cmd_from_cdb(se_cmd, cdb); if (rc != 0) { transport_generic_request_failure(se_cmd); return; } + + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(se_cmd); + /* * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend * for immediate execution of READs, otherwise wait for @@ -1872,72 +1735,30 @@ int transport_generic_handle_tmr( EXPORT_SYMBOL(transport_generic_handle_tmr); /* - * If the task is active, request it to be stopped and sleep until it + * If the cmd is active, request it to be stopped and sleep until it * has completed. */ -bool target_stop_task(struct se_task *task, unsigned long *flags) +bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) { - struct se_cmd *cmd = task->task_se_cmd; bool was_active = false; - if (task->task_flags & TF_ACTIVE) { - task->task_flags |= TF_REQUEST_STOP; + if (cmd->transport_state & CMD_T_BUSY) { + cmd->transport_state |= CMD_T_REQUEST_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - pr_debug("Task %p waiting to complete\n", task); - wait_for_completion(&task->task_stop_comp); - pr_debug("Task %p stopped successfully\n", task); + pr_debug("cmd %p waiting to complete\n", cmd); + wait_for_completion(&cmd->task_stop_comp); + pr_debug("cmd %p stopped successfully\n", cmd); spin_lock_irqsave(&cmd->t_state_lock, *flags); - atomic_dec(&cmd->t_task_cdbs_left); - task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); + cmd->transport_state &= ~CMD_T_REQUEST_STOP; + cmd->transport_state &= ~CMD_T_BUSY; was_active = true; } return was_active; } -static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) -{ - struct se_task *task, *task_tmp; - unsigned long flags; - int ret = 0; - - pr_debug("ITT[0x%08x] - Stopping tasks\n", - cmd->se_tfo->get_task_tag(cmd)); - - /* - * No tasks remain in the execution queue - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - pr_debug("Processing task %p\n", task); - /* - * If the struct se_task has not been sent and is not active, - * remove the struct se_task from the execution queue. - */ - if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - transport_remove_task_from_execute_queue(task, - cmd->se_dev); - - pr_debug("Task %p removed from execute queue\n", task); - spin_lock_irqsave(&cmd->t_state_lock, flags); - continue; - } - - if (!target_stop_task(task, &flags)) { - pr_debug("Task %p - did nothing\n", task); - ret++; - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - return ret; -} - /* * Handle SAM-esque emulation for generic transport request failures. */ @@ -1951,13 +1772,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->scsi_sense_reason); - pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" - " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" - " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_task_cdbs_ex_left), + pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", (cmd->transport_state & CMD_T_ACTIVE) != 0, (cmd->transport_state & CMD_T_STOP) != 0, (cmd->transport_state & CMD_T_SENT) != 0); @@ -2156,7 +1971,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * Called from fabric module context in transport_generic_new_cmd() and * transport_generic_process_write() */ -static int transport_execute_tasks(struct se_cmd *cmd) +static void transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; struct se_device *se_dev = cmd->se_dev; @@ -2170,71 +1985,52 @@ static int transport_execute_tasks(struct se_cmd *cmd) * attribute for the tasks of the received struct se_cmd CDB */ add_tasks = transport_execute_task_attr(cmd); - if (!add_tasks) - goto execute_tasks; - /* - * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() - * adds associated se_tasks while holding dev->execute_task_lock - * before I/O dispath to avoid a double spinlock access. - */ - __transport_execute_tasks(se_dev, cmd); - return 0; + if (add_tasks) { + __transport_execute_tasks(se_dev, cmd); + return; + } } - -execute_tasks: __transport_execute_tasks(se_dev, NULL); - return 0; } -/* - * Called to check struct se_device tcq depth window, and once open pull struct se_task - * from struct se_device->execute_task_list and - * - * Called from transport_processing_thread() - */ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) { int error; struct se_cmd *cmd = NULL; - struct se_task *task = NULL; unsigned long flags; check_depth: spin_lock_irq(&dev->execute_task_lock); if (new_cmd != NULL) - __transport_add_tasks_from_cmd(new_cmd); + __target_add_to_execute_list(new_cmd); - if (list_empty(&dev->execute_task_list)) { + if (list_empty(&dev->execute_list)) { spin_unlock_irq(&dev->execute_task_lock); return 0; } - task = list_first_entry(&dev->execute_task_list, - struct se_task, t_execute_list); - __transport_remove_task_from_execute_queue(task, dev); + cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list); + __target_remove_from_execute_list(cmd); spin_unlock_irq(&dev->execute_task_lock); - cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags |= (TF_ACTIVE | TF_SENT); - atomic_inc(&cmd->t_task_cdbs_sent); - - if (atomic_read(&cmd->t_task_cdbs_sent) == - cmd->t_task_list_num) - cmd->transport_state |= CMD_T_SENT; + cmd->transport_state |= CMD_T_BUSY; + cmd->transport_state |= CMD_T_SENT; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->execute_task) - error = cmd->execute_task(task); - else - error = dev->transport->do_task(task); + if (cmd->execute_cmd) + error = cmd->execute_cmd(cmd); + else { + error = dev->transport->execute_cmd(cmd, cmd->t_data_sg, + cmd->t_data_nents, cmd->data_direction); + } + if (error != 0) { spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; + cmd->transport_state &= ~CMD_T_BUSY; cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd); } @@ -2392,12 +2188,12 @@ static inline u32 transport_get_size( } else /* bytes */ return sectors; } -#if 0 + pr_debug("Returning block_size: %u, sectors: %u == %u for" - " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, - dev->se_sub_dev->se_dev_attrib.block_size * sectors, - dev->transport->name); -#endif + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, + sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } @@ -2462,7 +2258,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) { unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; struct se_device *dev = cmd->se_dev; - struct se_task *task = NULL, *task_tmp; unsigned long flags; u32 offset = 0; @@ -2477,44 +2272,37 @@ static int transport_get_sense_data(struct se_cmd *cmd) return 0; } - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - if (!(task->task_flags & TF_HAS_SENSE)) - continue; - - if (!dev->transport->get_sense_buffer) { - pr_err("dev->transport->get_sense_buffer" - " is NULL\n"); - continue; - } - - sense_buffer = dev->transport->get_sense_buffer(task); - if (!sense_buffer) { - pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" - " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), task); - continue; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) + goto out; - offset = cmd->se_tfo->set_fabric_sense_len(cmd, - TRANSPORT_SENSE_BUFFER); + if (!dev->transport->get_sense_buffer) { + pr_err("dev->transport->get_sense_buffer is NULL\n"); + goto out; + } - memcpy(&buffer[offset], sense_buffer, - TRANSPORT_SENSE_BUFFER); - cmd->scsi_status = task->task_scsi_status; - /* Automatically padded */ - cmd->scsi_sense_length = - (TRANSPORT_SENSE_BUFFER + offset); - - pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" - " and sense\n", - dev->se_hba->hba_id, dev->transport->name, - cmd->scsi_status); - return 0; + sense_buffer = dev->transport->get_sense_buffer(cmd); + if (!sense_buffer) { + pr_err("ITT 0x%08x cmd %p: Unable to locate" + " sense buffer for task with sense\n", + cmd->se_tfo->get_task_tag(cmd), cmd); + goto out; } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); + + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); + + /* Automatically padded */ + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); + return 0; + +out: + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return -1; } @@ -2581,7 +2369,7 @@ static int target_check_write_same_discard(unsigned char *flags, struct se_devic * Generic Command Sequencer that should work for most DAS transport * drivers. * - * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD + * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD * RX Thread. * * FIXME: Need to support other SCSI OPCODES where as well. @@ -2615,11 +2403,10 @@ static int transport_generic_cmd_sequencer( * by the ALUA primary or secondary access state.. */ if (ret > 0) { -#if 0 pr_debug("[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", cmd->se_tfo->get_fabric_name(), alua_ascq); -#endif + transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; @@ -2695,6 +2482,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: + case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; @@ -2796,7 +2584,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[10], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; default: pr_err("VARIABLE_LENGTH_CMD service action" @@ -2810,9 +2598,9 @@ static int transport_generic_cmd_sequencer( /* * Check for emulated MI_REPORT_TARGET_PGS. */ - if (cdb[1] == MI_REPORT_TARGET_PGS && + if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_task = + cmd->execute_cmd = target_emulate_report_target_port_groups; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -2835,13 +2623,13 @@ static int transport_generic_cmd_sequencer( size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_modesense; + cmd->execute_cmd = target_emulate_modesense; break; case MODE_SENSE_10: size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_modesense; + cmd->execute_cmd = target_emulate_modesense; break; case GPCMD_READ_BUFFER_CAPACITY: case GPCMD_SEND_OPC: @@ -2863,13 +2651,13 @@ static int transport_generic_cmd_sequencer( break; case PERSISTENT_RESERVE_IN: if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_task = target_scsi3_emulate_pr_in; + cmd->execute_cmd = target_scsi3_emulate_pr_in; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case PERSISTENT_RESERVE_OUT: if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_task = target_scsi3_emulate_pr_out; + cmd->execute_cmd = target_scsi3_emulate_pr_out; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; @@ -2890,7 +2678,7 @@ static int transport_generic_cmd_sequencer( */ if (cdb[1] == MO_SET_TARGET_PGS && su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_task = + cmd->execute_cmd = target_emulate_set_target_port_groups; } @@ -2912,7 +2700,7 @@ static int transport_generic_cmd_sequencer( cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_inquiry; + cmd->execute_cmd = target_emulate_inquiry; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; @@ -2922,7 +2710,7 @@ static int transport_generic_cmd_sequencer( size = READ_CAP_LEN; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_readcapacity; + cmd->execute_cmd = target_emulate_readcapacity; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: @@ -2934,7 +2722,7 @@ static int transport_generic_cmd_sequencer( switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: if (!passthrough) - cmd->execute_task = + cmd->execute_cmd = target_emulate_readcapacity_16; break; default: @@ -2977,7 +2765,7 @@ static int transport_generic_cmd_sequencer( size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_request_sense; + cmd->execute_cmd = target_emulate_request_sense; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; @@ -3006,7 +2794,7 @@ static int transport_generic_cmd_sequencer( * emulation disabled. */ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_task = target_scsi2_reservation_reserve; + cmd->execute_cmd = target_scsi2_reservation_reserve; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -3021,7 +2809,7 @@ static int transport_generic_cmd_sequencer( size = cmd->data_length; if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_task = target_scsi2_reservation_release; + cmd->execute_cmd = target_scsi2_reservation_release; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3053,13 +2841,13 @@ static int transport_generic_cmd_sequencer( if (transport_cmd_get_valid_sectors(cmd) < 0) goto out_invalid_cdb_field; } - cmd->execute_task = target_emulate_synchronize_cache; + cmd->execute_cmd = target_emulate_synchronize_cache; break; case UNMAP: size = get_unaligned_be16(&cdb[7]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_unmap; + cmd->execute_cmd = target_emulate_unmap; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); @@ -3079,7 +2867,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; case WRITE_SAME: sectors = transport_get_sectors_10(cdb, cmd, §or_ret); @@ -3102,7 +2890,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; case ALLOW_MEDIUM_REMOVAL: case ERASE: @@ -3115,7 +2903,7 @@ static int transport_generic_cmd_sequencer( case WRITE_FILEMARKS: cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; if (!passthrough) - cmd->execute_task = target_emulate_noop; + cmd->execute_cmd = target_emulate_noop; break; case GPCMD_CLOSE_TRACK: case INITIALIZE_ELEMENT_STATUS: @@ -3125,7 +2913,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case REPORT_LUNS: - cmd->execute_task = target_report_luns; + cmd->execute_cmd = target_report_luns; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS @@ -3135,6 +2923,42 @@ static int transport_generic_cmd_sequencer( cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; + case GET_EVENT_STATUS_NOTIFICATION: + size = (cdb[7] << 8) | cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case ATA_16: + /* Only support ATA passthrough to pSCSI backends.. */ + if (!passthrough) + goto out_unsupported_cdb; + + /* T_LENGTH */ + switch (cdb[2] & 0x3) { + case 0x0: + sectors = 0; + break; + case 0x1: + sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; + break; + case 0x2: + sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; + break; + case 0x3: + pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); + goto out_invalid_cdb_field; + } + + /* BYTE_BLOCK */ + if (cdb[2] & 0x4) { + /* BLOCK T_TYPE: 512 or sector */ + size = sectors * ((cdb[2] & 0x10) ? + dev->se_sub_dev->se_dev_attrib.block_size : 512); + } else { + /* BYTE */ + size = sectors; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; default: pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", @@ -3142,6 +2966,9 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; } + if (cmd->unknown_data_length) + cmd->data_length = size; + if (size != cmd->data_length) { pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" @@ -3177,15 +3004,25 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && - sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n", - cdb[0], sectors); - goto out_invalid_cdb_field; + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds fabric_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.fabric_max_sectors); + goto out_invalid_cdb_field; + } + if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds backend hw_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.hw_max_sectors); + goto out_invalid_cdb_field; + } } /* reject any command that we don't have a handler for */ - if (!(passthrough || cmd->execute_task || + if (!(passthrough || cmd->execute_cmd || (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) goto out_unsupported_cdb; @@ -3250,7 +3087,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd_p->t_task_cdb[0], cmd_p->sam_task_attr, cmd_p->se_ordered_id); - transport_add_tasks_from_cmd(cmd_p); + target_add_to_execute_list(cmd_p); new_active_tasks++; spin_lock(&dev->delayed_cmd_lock); @@ -3346,10 +3183,6 @@ static void target_complete_ok_work(struct work_struct *work) if (transport_get_sense_data(cmd) < 0) reason = TCM_NON_EXISTENT_LUN; - /* - * Only set when an struct se_task->task_scsi_status returned - * a non GOOD status. - */ if (cmd->scsi_status) { ret = transport_send_check_condition_and_sense( cmd, reason, 1); @@ -3424,33 +3257,6 @@ queue_full: transport_handle_queue_full(cmd, cmd->se_dev); } -static void transport_free_dev_tasks(struct se_cmd *cmd) -{ - struct se_task *task, *task_tmp; - unsigned long flags; - LIST_HEAD(dispose_list); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - if (!(task->task_flags & TF_ACTIVE)) - list_move_tail(&task->t_list, &dispose_list); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - while (!list_empty(&dispose_list)) { - task = list_first_entry(&dispose_list, struct se_task, t_list); - - if (task->task_sg != cmd->t_data_sg && - task->task_sg != cmd->t_bidi_data_sg) - kfree(task->task_sg); - - list_del(&task->t_list); - - cmd->se_dev->transport->free_task(task); - } -} - static inline void transport_free_sgl(struct scatterlist *sgl, int nents) { struct scatterlist *sg; @@ -3511,7 +3317,6 @@ static void transport_release_cmd(struct se_cmd *cmd) static void transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; - int free_tasks = 0; spin_lock_irqsave(&cmd->t_state_lock, flags); if (atomic_read(&cmd->t_fe_count)) { @@ -3519,21 +3324,12 @@ static void transport_put_cmd(struct se_cmd *cmd) goto out_busy; } - if (atomic_read(&cmd->t_se_count)) { - if (!atomic_dec_and_test(&cmd->t_se_count)) - goto out_busy; - } - if (cmd->transport_state & CMD_T_DEV_ACTIVE) { cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); - free_tasks = 1; + target_remove_from_state_list(cmd); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (free_tasks != 0) - transport_free_dev_tasks(cmd); - transport_free_pages(cmd); transport_release_cmd(cmd); return; @@ -3683,245 +3479,14 @@ out: return -ENOMEM; } -/* Reduce sectors if they are too long for the device */ -static inline sector_t transport_limit_task_sectors( - struct se_device *dev, - unsigned long long lba, - sector_t sectors) -{ - sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); - - if (dev->transport->get_device_type(dev) == TYPE_DISK) - if ((lba + sectors) > transport_dev_end_lba(dev)) - sectors = ((transport_dev_end_lba(dev) - lba) + 1); - - return sectors; -} - - -/* - * This function can be used by HW target mode drivers to create a linked - * scatterlist from all contiguously allocated struct se_task->task_sg[]. - * This is intended to be called during the completion path by TCM Core - * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. - */ -void transport_do_task_sg_chain(struct se_cmd *cmd) -{ - struct scatterlist *sg_first = NULL; - struct scatterlist *sg_prev = NULL; - int sg_prev_nents = 0; - struct scatterlist *sg; - struct se_task *task; - u32 chained_nents = 0; - int i; - - BUG_ON(!cmd->se_tfo->task_sg_chaining); - - /* - * Walk the struct se_task list and setup scatterlist chains - * for each contiguously allocated struct se_task->task_sg[]. - */ - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (!task->task_sg) - continue; - - if (!sg_first) { - sg_first = task->task_sg; - chained_nents = task->task_sg_nents; - } else { - sg_chain(sg_prev, sg_prev_nents, task->task_sg); - chained_nents += task->task_sg_nents; - } - /* - * For the padded tasks, use the extra SGL vector allocated - * in transport_allocate_data_tasks() for the sg_prev_nents - * offset into sg_chain() above. - * - * We do not need the padding for the last task (or a single - * task), but in that case we will never use the sg_prev_nents - * value below which would be incorrect. - */ - sg_prev_nents = (task->task_sg_nents + 1); - sg_prev = task->task_sg; - } - /* - * Setup the starting pointer and total t_tasks_sg_linked_no including - * padding SGs for linking and to mark the end. - */ - cmd->t_tasks_sg_chained = sg_first; - cmd->t_tasks_sg_chained_no = chained_nents; - - pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, - cmd->t_tasks_sg_chained_no); - - for_each_sg(cmd->t_tasks_sg_chained, sg, - cmd->t_tasks_sg_chained_no, i) { - - pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", - i, sg, sg_page(sg), sg->length, sg->offset); - if (sg_is_chain(sg)) - pr_debug("SG: %p sg_is_chain=1\n", sg); - if (sg_is_last(sg)) - pr_debug("SG: %p sg_is_last=1\n", sg); - } -} -EXPORT_SYMBOL(transport_do_task_sg_chain); - -/* - * Break up cmd into chunks transport can handle - */ -static int -transport_allocate_data_tasks(struct se_cmd *cmd, - enum dma_data_direction data_direction, - struct scatterlist *cmd_sg, unsigned int sgl_nents) -{ - struct se_device *dev = cmd->se_dev; - int task_count, i; - unsigned long long lba; - sector_t sectors, dev_max_sectors; - u32 sector_size; - - if (transport_cmd_get_valid_sectors(cmd) < 0) - return -EINVAL; - - dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - sector_size = dev->se_sub_dev->se_dev_attrib.block_size; - - WARN_ON(cmd->data_length % sector_size); - - lba = cmd->t_task_lba; - sectors = DIV_ROUND_UP(cmd->data_length, sector_size); - task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); - - /* - * If we need just a single task reuse the SG list in the command - * and avoid a lot of work. - */ - if (task_count == 1) { - struct se_task *task; - unsigned long flags; - - task = transport_generic_get_task(cmd, data_direction); - if (!task) - return -ENOMEM; - - task->task_sg = cmd_sg; - task->task_sg_nents = sgl_nents; - - task->task_lba = lba; - task->task_sectors = sectors; - task->task_size = task->task_sectors * sector_size; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - return task_count; - } - - for (i = 0; i < task_count; i++) { - struct se_task *task; - unsigned int task_size, task_sg_nents_padded; - struct scatterlist *sg; - unsigned long flags; - int count; - - task = transport_generic_get_task(cmd, data_direction); - if (!task) - return -ENOMEM; - - task->task_lba = lba; - task->task_sectors = min(sectors, dev_max_sectors); - task->task_size = task->task_sectors * sector_size; - - /* - * This now assumes that passed sg_ents are in PAGE_SIZE chunks - * in order to calculate the number per task SGL entries - */ - task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); - /* - * Check if the fabric module driver is requesting that all - * struct se_task->task_sg[] be chained together.. If so, - * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL for every task except - * the last one for (task_count > 1) operation, or skipping - * the extra padding for the (task_count == 1) case. - */ - if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { - task_sg_nents_padded = (task->task_sg_nents + 1); - } else - task_sg_nents_padded = task->task_sg_nents; - - task->task_sg = kmalloc(sizeof(struct scatterlist) * - task_sg_nents_padded, GFP_KERNEL); - if (!task->task_sg) { - cmd->se_dev->transport->free_task(task); - return -ENOMEM; - } - - sg_init_table(task->task_sg, task_sg_nents_padded); - - task_size = task->task_size; - - /* Build new sgl, only up to task_size */ - for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { - if (cmd_sg->length > task_size) - break; - - *sg = *cmd_sg; - task_size -= cmd_sg->length; - cmd_sg = sg_next(cmd_sg); - } - - lba += task->task_sectors; - sectors -= task->task_sectors; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - } - - return task_count; -} - -static int -transport_allocate_control_task(struct se_cmd *cmd) -{ - struct se_task *task; - unsigned long flags; - - /* Workaround for handling zero-length control CDBs */ - if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && - !cmd->data_length) - return 0; - - task = transport_generic_get_task(cmd, cmd->data_direction); - if (!task) - return -ENOMEM; - - task->task_sg = cmd->t_data_sg; - task->task_size = cmd->data_length; - task->task_sg_nents = cmd->t_data_nents; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - /* Success! Return number of tasks allocated */ - return 1; -} - /* - * Allocate any required ressources to execute the command, and either place - * it on the execution queue if possible. For writes we might not have the - * payload yet, thus notify the fabric via a call to ->write_pending instead. + * Allocate any required resources to execute the command. For writes we + * might not have the payload yet, so notify the fabric via a call to + * ->write_pending instead. Otherwise place it on the execution queue. */ int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - int task_cdbs, task_cdbs_bidi = 0; - int set_counts = 1; int ret = 0; /* @@ -3936,35 +3501,9 @@ int transport_generic_new_cmd(struct se_cmd *cmd) goto out_fail; } - /* - * For BIDI command set up the read tasks first. - */ - if (cmd->t_bidi_data_sg && - dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { - BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); - - task_cdbs_bidi = transport_allocate_data_tasks(cmd, - DMA_FROM_DEVICE, cmd->t_bidi_data_sg, - cmd->t_bidi_data_nents); - if (task_cdbs_bidi <= 0) - goto out_fail; - - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - set_counts = 0; - } - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - task_cdbs = transport_allocate_data_tasks(cmd, - cmd->data_direction, cmd->t_data_sg, - cmd->t_data_nents); - } else { - task_cdbs = transport_allocate_control_task(cmd); - } - - if (task_cdbs < 0) - goto out_fail; - else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + /* Workaround for handling zero-length control CDBs */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + !cmd->data_length) { spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= CMD_T_ACTIVE; @@ -3982,29 +3521,31 @@ int transport_generic_new_cmd(struct se_cmd *cmd) return 0; } - if (set_counts) { - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib; + + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + + BUG_ON(cmd->data_length % attr->block_size); + BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) > + attr->hw_max_sectors); } - cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); - atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); - atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); + atomic_inc(&cmd->t_fe_count); /* - * For WRITEs, let the fabric know its buffer is ready.. - * This WRITE struct se_cmd (and all of its associated struct se_task's) - * will be added to the struct se_device execution queue after its WRITE - * data has arrived. (ie: It gets handled by the transport processing - * thread a second time) + * For WRITEs, let the fabric know its buffer is ready. + * + * The command will be added to the execution queue after its write + * data has arrived. */ if (cmd->data_direction == DMA_TO_DEVICE) { - transport_add_tasks_to_state_queue(cmd); + target_add_to_state_list(cmd); return transport_generic_write_pending(cmd); } /* - * Everything else but a WRITE, add the struct se_cmd's struct se_task's - * to the execution queue. + * Everything else but a WRITE, add the command to the execution queue. */ transport_execute_tasks(cmd); return 0; @@ -4091,8 +3632,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) if (cmd->se_lun) transport_lun_remove_cmd(cmd); - transport_free_dev_tasks(cmd); - transport_put_cmd(cmd); } } @@ -4233,7 +3772,8 @@ EXPORT_SYMBOL(target_wait_for_sess_cmds); static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) { unsigned long flags; - int ret; + int ret = 0; + /* * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. @@ -4253,10 +3793,21 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - ret = transport_stop_tasks_for_cmd(cmd); + // XXX: audit task_flags checks. + spin_lock_irqsave(&cmd->t_state_lock, flags); + if ((cmd->transport_state & CMD_T_BUSY) && + (cmd->transport_state & CMD_T_SENT)) { + if (!target_stop_cmd(cmd, &flags)) + ret++; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } else { + spin_unlock_irqrestore(&cmd->t_state_lock, + flags); + target_remove_from_execute_list(cmd); + } - pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task_list_num, ret); + pr_debug("ConfigFS: cmd: %p stop tasks ret:" + " %d\n", cmd, ret); if (!ret) { pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); @@ -4328,10 +3879,9 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) goto check_cond; } cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - transport_free_dev_tasks(cmd); /* * The Storage engine stopped this struct se_cmd before it was * send to the fabric frontend for delivery back to the @@ -4444,7 +3994,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) wait_for_completion(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * At this point, the frontend who was the originator of this * struct se_cmd, now owns the structure and can be released through @@ -4710,12 +4260,12 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; -#if 0 + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -#endif + cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; cmd->se_tfo->queue_status(cmd); ret = 1; @@ -4748,11 +4298,11 @@ void transport_send_task_abort(struct se_cmd *cmd) } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; -#if 0 + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," " ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -#endif + cmd->se_tfo->queue_status(cmd); } @@ -4865,7 +4415,7 @@ get_cmd: } out: - WARN_ON(!list_empty(&dev->state_task_list)); + WARN_ON(!list_empty(&dev->state_list)); WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); dev->process_thread = NULL; return 0; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index a375f257aab..f03fb9730f5 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -215,20 +215,10 @@ int ft_write_pending(struct se_cmd *se_cmd) */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { - if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - /* - * cmd may have been broken up into multiple - * tasks. Link their sgs together so we can - * operate on them all at once. - */ - transport_do_task_sg_chain(se_cmd); - cmd->sg = se_cmd->t_tasks_sg_chained; - cmd->sg_cnt = - se_cmd->t_tasks_sg_chained_no; - } - if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, - cmd->sg, - cmd->sg_cnt)) + if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) && + lport->tt.ddp_target(lport, ep->xid, + se_cmd->t_data_sg, + se_cmd->t_data_nents)) cmd->was_ddp_setup = 1; } } diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 2948dc94461..9501844fae2 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -576,9 +576,6 @@ int ft_register_configfs(void) } fabric->tf_ops = ft_fabric_ops; - /* Allowing support for task_sg_chaining */ - fabric->tf_ops.task_sg_chaining = 1; - /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index dc7c0db26e2..071a505f98f 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -228,7 +228,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) "payload, Frame will be dropped if" "'Sequence Initiative' bit in f_ctl is" "not set\n", __func__, ep->xid, f_ctl, - cmd->sg, cmd->sg_cnt); + se_cmd->t_data_sg, se_cmd->t_data_nents); /* * Invalidate HW DDP context if it was setup for respective * command. Invalidation of HW DDP context is requited in both diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 8c9ff1b1439..2d7db85e93a 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -23,12 +23,11 @@ struct se_subsystem_api { struct se_device *(*create_virtdevice)(struct se_hba *, struct se_subsystem_dev *, void *); void (*free_device)(void *); - int (*transport_complete)(struct se_task *task); - struct se_task *(*alloc_task)(unsigned char *cdb); - int (*do_task)(struct se_task *); + int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); + int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32, + enum dma_data_direction); int (*do_discard)(struct se_device *, sector_t, u32); - void (*do_sync_cache)(struct se_task *); - void (*free_task)(struct se_task *); + void (*do_sync_cache)(struct se_cmd *); ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *); ssize_t (*set_configfs_dev_params)(struct se_hba *, @@ -38,7 +37,7 @@ struct se_subsystem_api { u32 (*get_device_rev)(struct se_device *); u32 (*get_device_type)(struct se_device *); sector_t (*get_blocks)(struct se_device *); - unsigned char *(*get_sense_buffer)(struct se_task *); + unsigned char *(*get_sense_buffer)(struct se_cmd *); }; int transport_subsystem_register(struct se_subsystem_api *); @@ -48,10 +47,7 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *, struct se_subsystem_api *, struct se_subsystem_dev *, u32, void *, struct se_dev_limits *, const char *, const char *); -void transport_complete_sync_cache(struct se_cmd *, int); -void transport_complete_task(struct se_task *, int); - -void target_get_task_cdb(struct se_task *, unsigned char *); +void target_complete_cmd(struct se_cmd *, u8); void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index aaccc5f5fc9..dc35d8660aa 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -73,9 +73,8 @@ /* * struct se_device->dev_flags */ -#define DF_READ_ONLY 0x00000001 -#define DF_SPC2_RESERVATIONS 0x00000002 -#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 +#define DF_SPC2_RESERVATIONS 0x00000001 +#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000002 /* struct se_dev_attrib sanity values */ /* Default max_unmap_lba_count */ @@ -141,14 +140,6 @@ enum transport_tpg_type_table { TRANSPORT_TPG_TYPE_DISCOVERY = 1, }; -/* struct se_task->task_flags */ -enum se_task_flags { - TF_ACTIVE = (1 << 0), - TF_SENT = (1 << 1), - TF_REQUEST_STOP = (1 << 2), - TF_HAS_SENSE = (1 << 3), -}; - /* Special transport agnostic struct se_cmd->t_states */ enum transport_state_table { TRANSPORT_NO_STATE = 0, @@ -234,6 +225,7 @@ enum tcm_sense_reason_table { enum target_sc_flags_table { TARGET_SCF_BIDI_OP = 0x01, TARGET_SCF_ACK_KREF = 0x02, + TARGET_SCF_UNKNOWN_SIZE = 0x04, }; /* fabric independent task management function values */ @@ -338,6 +330,7 @@ struct t10_alua_tg_pt_gp { int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; int tg_pt_gp_trans_delay_msecs; + int tg_pt_gp_implict_trans_secs; int tg_pt_gp_pref; int tg_pt_gp_write_metadata; /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */ @@ -485,23 +478,6 @@ struct se_queue_obj { wait_queue_head_t thread_wq; }; -struct se_task { - unsigned long long task_lba; - u32 task_sectors; - u32 task_size; - struct se_cmd *task_se_cmd; - struct scatterlist *task_sg; - u32 task_sg_nents; - u16 task_flags; - u8 task_scsi_status; - enum dma_data_direction task_data_direction; - struct list_head t_list; - struct list_head t_execute_list; - struct list_head t_state_list; - bool t_state_active; - struct completion task_stop_comp; -}; - struct se_tmr_req { /* Task Management function to be performed */ u8 function; @@ -538,6 +514,7 @@ struct se_cmd { /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ unsigned check_release:1; unsigned cmd_wait_set:1; + unsigned unknown_data_length:1; /* See se_cmd_flags_table */ u32 se_cmd_flags; u32 se_ordered_id; @@ -565,18 +542,13 @@ struct se_cmd { struct completion cmd_wait_comp; struct kref cmd_kref; struct target_core_fabric_ops *se_tfo; - int (*execute_task)(struct se_task *); + int (*execute_cmd)(struct se_cmd *); void (*transport_complete_callback)(struct se_cmd *); unsigned char *t_task_cdb; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; unsigned long long t_task_lba; - u32 t_tasks_sg_chained_no; atomic_t t_fe_count; - atomic_t t_se_count; - atomic_t t_task_cdbs_left; - atomic_t t_task_cdbs_ex_left; - atomic_t t_task_cdbs_sent; unsigned int transport_state; #define CMD_T_ABORTED (1 << 0) #define CMD_T_ACTIVE (1 << 1) @@ -588,11 +560,12 @@ struct se_cmd { #define CMD_T_LUN_STOP (1 << 7) #define CMD_T_LUN_FE_STOP (1 << 8) #define CMD_T_DEV_ACTIVE (1 << 9) +#define CMD_T_REQUEST_STOP (1 << 10) +#define CMD_T_BUSY (1 << 11) spinlock_t t_state_lock; struct completion t_transport_stop_comp; struct completion transport_lun_fe_stop_comp; struct completion transport_lun_stop_comp; - struct scatterlist *t_tasks_sg_chained; struct work_struct work; @@ -602,10 +575,15 @@ struct se_cmd { struct scatterlist *t_bidi_data_sg; unsigned int t_bidi_data_nents; - /* Used for BIDI READ */ - struct list_head t_task_list; - u32 t_task_list_num; + struct list_head execute_list; + struct list_head state_list; + bool state_active; + + /* old task stop completion, consider merging with some of the above */ + struct completion task_stop_comp; + /* backend private data */ + void *priv; }; struct se_ua { @@ -731,7 +709,6 @@ struct se_dev_attrib { u32 hw_block_size; u32 block_size; u32 hw_max_sectors; - u32 max_sectors; u32 fabric_max_sectors; u32 optimal_sectors; u32 hw_queue_depth; @@ -829,8 +806,8 @@ struct se_device { struct task_struct *process_thread; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; - struct list_head execute_task_list; - struct list_head state_task_list; + struct list_head execute_list; + struct list_head state_list; struct list_head qf_cmd_list; /* Pointer to associated SE HBA */ struct se_hba *se_hba; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 10c69080960..116959933f4 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -3,12 +3,6 @@ struct target_core_fabric_ops { struct configfs_subsystem *tf_subsys; - /* - * Optional to signal struct se_task->task_sg[] padding entries - * for scatterlist chaining using transport_do_task_sg_link(), - * disabled by default - */ - bool task_sg_chaining; char *(*get_fabric_name)(void); u8 (*get_fabric_proto_ident)(struct se_portal_group *); char *(*tpg_get_wwn)(struct se_portal_group *); @@ -102,7 +96,7 @@ void __transport_register_session(struct se_portal_group *, void transport_register_session(struct se_portal_group *, struct se_node_acl *, struct se_session *, void *); void target_get_session(struct se_session *); -int target_put_session(struct se_session *); +void target_put_session(struct se_session *); void transport_free_session(struct se_session *); void target_put_nacl(struct se_node_acl *); void transport_deregister_session_configfs(struct se_session *); @@ -112,7 +106,7 @@ void transport_deregister_session(struct se_session *); void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int, unsigned char *); int transport_lookup_cmd_lun(struct se_cmd *, u32); -int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); +int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u32, u32, int, int, int); int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, @@ -124,7 +118,6 @@ int transport_generic_handle_cdb_map(struct se_cmd *); int transport_generic_handle_data(struct se_cmd *); int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); -void transport_do_task_sg_chain(struct se_cmd *); int transport_generic_new_cmd(struct se_cmd *); void transport_generic_process_write(struct se_cmd *); |