diff options
Diffstat (limited to 'drivers/target')
43 files changed, 4191 insertions, 2265 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index b28794b7212..18303686eb5 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -32,5 +32,6 @@ config TCM_PSCSI source "drivers/target/loopback/Kconfig" source "drivers/target/tcm_fc/Kconfig" source "drivers/target/iscsi/Kconfig" +source "drivers/target/sbp/Kconfig" endif diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 62e54053bcd..61648d84fbb 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ obj-$(CONFIG_TCM_FC) += tcm_fc/ obj-$(CONFIG_ISCSI_TARGET) += iscsi/ +obj-$(CONFIG_SBP_TARGET) += sbp/ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8b1d5e62ed4..d57d10cb2e4 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -27,8 +27,10 @@ #include <asm/unaligned.h> #include <scsi/scsi_device.h> #include <scsi/iscsi_proto.h> +#include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/target_core_configfs.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -593,7 +595,7 @@ static void __exit iscsi_target_cleanup_module(void) kfree(iscsit_global); } -int iscsit_add_reject( +static int iscsit_add_reject( u8 reason, int fail_conn, unsigned char *buf, @@ -622,7 +624,7 @@ int iscsit_add_reject( } spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); cmd->i_state = ISTATE_SEND_REJECT; @@ -669,7 +671,7 @@ int iscsit_add_reject_from_cmd( if (add_to_conn) { spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); } @@ -685,9 +687,7 @@ int iscsit_add_reject_from_cmd( /* * Map some portion of the allocated scatterlist to an iovec, suitable for - * kernel sockets to copy data in/out. This handles both pages and slab-allocated - * buffers, since we have been tricky and mapped t_mem_sg to the buffer in - * either case (see iscsit_alloc_buffs) + * kernel sockets to copy data in/out. */ static int iscsit_map_iovec( struct iscsi_cmd *cmd, @@ -700,10 +700,9 @@ static int iscsit_map_iovec( unsigned int page_off; /* - * We have a private mapping of the allocated pages in t_mem_sg. - * At this point, we also know each contains a page. + * We know each entry in t_data_sg contains a page. */ - sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; + sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; page_off = (data_offset % PAGE_SIZE); cmd->first_data_sg = sg; @@ -744,7 +743,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) conn->exp_statsn = exp_statsn; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { spin_lock(&cmd->istate_lock); if ((cmd->i_state == ISTATE_SENT_STATUS) && (cmd->stat_sn < exp_statsn)) { @@ -761,8 +760,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) { - u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : - cmd->se_cmd.t_data_nents; + u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); iov_count += ISCSI_IOV_DATA_BUFFER; @@ -776,64 +774,6 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) return 0; } -static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) -{ - struct scatterlist *sgl; - u32 length = cmd->se_cmd.data_length; - int nents = DIV_ROUND_UP(length, PAGE_SIZE); - int i = 0, j = 0, ret; - /* - * If no SCSI payload is present, allocate the default iovecs used for - * iSCSI PDU Header - */ - if (!length) - return iscsit_allocate_iovecs(cmd); - - sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL); - if (!sgl) - return -ENOMEM; - - sg_init_table(sgl, nents); - - while (length) { - int buf_size = min_t(int, length, PAGE_SIZE); - struct page *page; - - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto page_alloc_failed; - - sg_set_page(&sgl[i], page, buf_size, 0); - - length -= buf_size; - i++; - } - - cmd->t_mem_sg = sgl; - cmd->t_mem_sg_nents = nents; - - /* BIDI ops not supported */ - - /* Tell the core about our preallocated memory */ - transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0); - /* - * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd - * so that cmd->se_cmd.t_tasks_se_num has been set. - */ - ret = iscsit_allocate_iovecs(cmd); - if (ret < 0) - return -ENOMEM; - - return 0; - -page_alloc_failed: - while (j < i) - __free_page(sg_page(&sgl[j++])); - - kfree(sgl); - return -ENOMEM; -} - static int iscsit_handle_scsi_cmd( struct iscsi_conn *conn, unsigned char *buf) @@ -842,6 +782,8 @@ static int iscsit_handle_scsi_cmd( int dump_immediate_data = 0, send_check_condition = 0, payload_length; struct iscsi_cmd *cmd = NULL; struct iscsi_scsi_req *hdr; + int iscsi_task_attr; + int sam_task_attr; spin_lock_bh(&conn->sess->session_stats_lock); conn->sess->cmd_pdus++; @@ -958,15 +900,30 @@ done: (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : DMA_NONE; - cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, - (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK)); + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, - buf, conn); + buf, conn); - pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," - " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, - hdr->cmdsn, hdr->data_length, payload_length, conn->cid); + cmd->data_direction = data_direction; + iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; + /* + * Figure out the SAM Task Attribute for the incoming SCSI CDB + */ + if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || + (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) + sam_task_attr = MSG_SIMPLE_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) + sam_task_attr = MSG_ORDERED_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) + sam_task_attr = MSG_HEAD_TAG; + else if (iscsi_task_attr == ISCSI_ATTR_ACA) + sam_task_attr = MSG_ACA_TAG; + else { + pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" + " MSG_SIMPLE_TAG\n", iscsi_task_attr); + sam_task_attr = MSG_SIMPLE_TAG; + } cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; cmd->i_state = ISTATE_NEW_CMD; @@ -1003,6 +960,17 @@ done: } /* + * Initialize struct se_cmd descriptor from target_core_mod infrastructure + */ + transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, + conn->sess->se_sess, hdr->data_length, cmd->data_direction, + sam_task_attr, &cmd->sense_buffer[0]); + + pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," + " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, + hdr->cmdsn, hdr->data_length, payload_length, conn->cid); + + /* * The CDB is going to an se_device_t. */ ret = transport_lookup_cmd_lun(&cmd->se_cmd, @@ -1016,13 +984,8 @@ done: send_check_condition = 1; goto attach_cmd; } - /* - * The Initiator Node has access to the LUN (the addressing method - * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to - * allocate 1->N transport tasks (depending on sector count and - * maximum request size the physical HBA(s) can handle. - */ - transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb); + + transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); if (transport_ret == -ENOMEM) { return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1035,9 +998,7 @@ done: */ send_check_condition = 1; } else { - cmd->data_length = cmd->se_cmd.data_length; - - if (iscsit_decide_list_to_build(cmd, payload_length) < 0) + if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 1, buf, cmd); @@ -1045,18 +1006,15 @@ done: attach_cmd: spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. */ core_alua_check_nonop_delay(&cmd->se_cmd); - /* - * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). - * also call iscsit_allocate_iovecs() - */ - ret = iscsit_alloc_buffs(cmd); + + ret = iscsit_allocate_iovecs(cmd); if (ret < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1303,10 +1261,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); - if ((hdr->offset + payload_length) > cmd->data_length) { + if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) { pr_err("DataOut Offset: %u, Length %u greater than" " iSCSI Command EDTL %u, protocol error.\n", - hdr->offset, payload_length, cmd->data_length); + hdr->offset, payload_length, cmd->se_cmd.data_length); return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd); } @@ -1442,7 +1400,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) return 0; else if (ret == DATAOUT_SEND_R2T) { iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, conn, 0); + iscsit_build_r2ts_for_cmd(cmd, conn, false); } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { /* * Handle extra special case for out of order @@ -1617,7 +1575,7 @@ static int iscsit_handle_nop_out( * Initiator is expecting a NopIN ping reply, */ spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); @@ -1723,10 +1681,75 @@ static int iscsit_handle_task_mgt_cmd( (hdr->refcmdsn != ISCSI_RESERVED_TAG)) hdr->refcmdsn = ISCSI_RESERVED_TAG; - cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); + cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); if (!cmd) return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, buf, conn); + 1, buf, conn); + + cmd->data_direction = DMA_NONE; + + cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); + if (!cmd->tmr_req) { + pr_err("Unable to allocate memory for" + " Task Management command!\n"); + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + } + + /* + * TASK_REASSIGN for ERL=2 / connection stays inside of + * LIO-Target $FABRIC_MOD + */ + if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { + + u8 tcm_function; + int ret; + + transport_init_se_cmd(&cmd->se_cmd, + &lio_target_fabric_configfs->tf_ops, + conn->sess->se_sess, 0, DMA_NONE, + MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); + + switch (function) { + case ISCSI_TM_FUNC_ABORT_TASK: + tcm_function = TMR_ABORT_TASK; + break; + case ISCSI_TM_FUNC_ABORT_TASK_SET: + tcm_function = TMR_ABORT_TASK_SET; + break; + case ISCSI_TM_FUNC_CLEAR_ACA: + tcm_function = TMR_CLEAR_ACA; + break; + case ISCSI_TM_FUNC_CLEAR_TASK_SET: + tcm_function = TMR_CLEAR_TASK_SET; + break; + case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: + tcm_function = TMR_LUN_RESET; + break; + case ISCSI_TM_FUNC_TARGET_WARM_RESET: + tcm_function = TMR_TARGET_WARM_RESET; + break; + case ISCSI_TM_FUNC_TARGET_COLD_RESET: + tcm_function = TMR_TARGET_COLD_RESET; + break; + default: + pr_err("Unknown iSCSI TMR Function:" + " 0x%02x\n", function); + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + } + + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, + tcm_function, GFP_KERNEL); + if (ret < 0) + return iscsit_add_reject_from_cmd( + ISCSI_REASON_BOOKMARK_NO_RESOURCES, + 1, 1, buf, cmd); + + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; + } cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; cmd->i_state = ISTATE_SEND_TASKMGTRSP; @@ -1804,7 +1827,7 @@ static int iscsit_handle_task_mgt_cmd( se_tmr->call_transport = 1; attach: spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { @@ -1980,7 +2003,7 @@ static int iscsit_handle_text_cmd( cmd->data_direction = DMA_NONE; spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); @@ -2168,7 +2191,7 @@ static int iscsit_handle_logout_cmd( logout_remove = 1; spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) @@ -2178,7 +2201,7 @@ static int iscsit_handle_logout_cmd( * Immediate commands are executed, well, immediately. * Non-Immediate Logout Commands are executed in CmdSN order. */ - if (hdr->opcode & ISCSI_OP_IMMEDIATE) { + if (cmd->immediate_cmd) { int ret = iscsit_execute_cmd(cmd, 0); if (ret < 0) @@ -2336,7 +2359,7 @@ static int iscsit_handle_immediate_data( cmd->write_data_done += length; - if (cmd->write_data_done == cmd->data_length) { + if (cmd->write_data_done == cmd->se_cmd.data_length) { spin_lock_bh(&cmd->istate_lock); cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; @@ -2381,7 +2404,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) cmd->i_state = ISTATE_SEND_ASYNCMSG; spin_lock_bh(&conn_p->cmd_lock); - list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); spin_unlock_bh(&conn_p->cmd_lock); iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); @@ -2434,10 +2457,19 @@ static int iscsit_send_conn_drop_async_message( return 0; } +static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) +{ + if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || + (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { + wait_for_completion_interruptible_timeout( + &conn->tx_half_close_comp, + ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); + } +} + static int iscsit_send_data_in( struct iscsi_cmd *cmd, - struct iscsi_conn *conn, - int *eodr) + struct iscsi_conn *conn) { int iov_ret = 0, set_statsn = 0; u32 iov_count = 0, tx_size = 0; @@ -2445,6 +2477,8 @@ static int iscsit_send_data_in( struct iscsi_datain_req *dr; struct iscsi_data_rsp *hdr; struct kvec *iov; + int eodr = 0; + int ret; memset(&datain, 0, sizeof(struct iscsi_datain)); dr = iscsit_get_datain_values(cmd, &datain); @@ -2457,11 +2491,11 @@ static int iscsit_send_data_in( /* * Be paranoid and double check the logic for now. */ - if ((datain.offset + datain.length) > cmd->data_length) { + if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x, datain.offset: %u and" " datain.length: %u exceeds cmd->data_length: %u\n", cmd->init_task_tag, datain.offset, datain.length, - cmd->data_length); + cmd->se_cmd.data_length); return -1; } @@ -2577,13 +2611,26 @@ static int iscsit_send_data_in( cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), ntohl(hdr->offset), datain.length, conn->cid); + /* sendpage is preferred but can't insert markers */ + if (!conn->conn_ops->IFMarker) + ret = iscsit_fe_sendpage_sg(cmd, conn); + else + ret = iscsit_send_tx_data(cmd, conn, 0); + + iscsit_unmap_iovec(cmd); + + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + if (dr->dr_complete) { - *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? + eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2 : 1; iscsit_free_datain_req(cmd, dr); } - return 0; + return eodr; } static int iscsit_send_logout_response( @@ -2715,6 +2762,7 @@ static int iscsit_send_unsolicited_nopin( { int tx_size = ISCSI_HDR_LEN; struct iscsi_nopin *hdr; + int ret; hdr = (struct iscsi_nopin *) cmd->pdu; memset(hdr, 0, ISCSI_HDR_LEN); @@ -2747,6 +2795,17 @@ static int iscsit_send_unsolicited_nopin( pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = want_response ? + ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + return 0; } @@ -2837,13 +2896,14 @@ static int iscsit_send_nopin_response( return 0; } -int iscsit_send_r2t( +static int iscsit_send_r2t( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { int tx_size = 0; struct iscsi_r2t *r2t; struct iscsi_r2t_rsp *hdr; + int ret; r2t = iscsit_get_r2t_from_list(cmd); if (!r2t) @@ -2899,19 +2959,27 @@ int iscsit_send_r2t( r2t->sent_r2t = 1; spin_unlock_bh(&cmd->r2t_lock); + ret = iscsit_send_tx_data(cmd, conn, 1); + if (ret < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + return ret; + } + + spin_lock_bh(&cmd->dataout_timeout_lock); + iscsit_start_dataout_timer(cmd, conn); + spin_unlock_bh(&cmd->dataout_timeout_lock); + return 0; } /* - * type 0: Normal Operation. - * type 1: Called from Storage Transport. - * type 2: Called from iscsi_task_reassign_complete_write() for - * connection recovery. + * @recovery: If called from iscsi_task_reassign_complete_write() for + * connection recovery. */ int iscsit_build_r2ts_for_cmd( struct iscsi_cmd *cmd, struct iscsi_conn *conn, - int type) + bool recovery) { int first_r2t = 1; u32 offset = 0, xfer_len = 0; @@ -2922,32 +2990,37 @@ int iscsit_build_r2ts_for_cmd( return 0; } - if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) - if (cmd->r2t_offset < cmd->write_data_done) - cmd->r2t_offset = cmd->write_data_done; + if (conn->sess->sess_ops->DataSequenceInOrder && + !recovery) + cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { if (conn->sess->sess_ops->DataSequenceInOrder) { offset = cmd->r2t_offset; - if (first_r2t && (type == 2)) { - xfer_len = ((offset + - (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len) > - cmd->data_length) ? - (cmd->data_length - offset) : - (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len)); + if (first_r2t && recovery) { + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = + conn->sess->sess_ops->MaxBurstLength - + cmd->next_burst_len; } else { - xfer_len = ((offset + - conn->sess->sess_ops->MaxBurstLength) > - cmd->data_length) ? - (cmd->data_length - offset) : - conn->sess->sess_ops->MaxBurstLength; + int new_data_end = offset + + conn->sess->sess_ops->MaxBurstLength; + + if (new_data_end > cmd->se_cmd.data_length) + xfer_len = cmd->se_cmd.data_length - offset; + else + xfer_len = conn->sess->sess_ops->MaxBurstLength; } cmd->r2t_offset += xfer_len; - if (cmd->r2t_offset == cmd->data_length) + if (cmd->r2t_offset == cmd->se_cmd.data_length) cmd->cmd_flags |= ICF_SENT_LAST_R2T; } else { struct iscsi_seq *seq; @@ -3179,6 +3252,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) return ret; } +#define SENDTARGETS_BUF_LIMIT 32768U + static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) { char *payload = NULL; @@ -3187,12 +3262,10 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_tiqn *tiqn; struct iscsi_tpg_np *tpg_np; int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; - unsigned char buf[256]; + unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ - buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? - 32768 : conn->conn_ops->MaxRecvDataSegmentLength; - - memset(buf, 0, 256); + buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, + SENDTARGETS_BUF_LIMIT); payload = kzalloc(buffer_len, GFP_KERNEL); if (!payload) { @@ -3408,18 +3481,6 @@ static int iscsit_send_reject( return 0; } -static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) -{ - if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || - (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { - wait_for_completion_interruptible_timeout( - &conn->tx_half_close_comp, - ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); - } -} - -#ifdef CONFIG_SMP - void iscsit_thread_get_cpumask(struct iscsi_conn *conn) { struct iscsi_thread_set *ts = conn->thread_set; @@ -3433,10 +3494,6 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) * execute upon. */ ord = ts->thread_id % cpumask_weight(cpu_online_mask); -#if 0 - pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from" - " thread_id: %d\n", ord, ts->thread_id); -#endif for_each_online_cpu(cpu) { if (ord-- == 0) { cpumask_set_cpu(cpu, conn->conn_cpumask); @@ -3476,34 +3533,196 @@ static inline void iscsit_thread_check_cpumask( */ memset(buf, 0, 128); cpumask_scnprintf(buf, 128, conn->conn_cpumask); -#if 0 - pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():" - " %s for %s\n", buf, p->comm); -#endif set_cpus_allowed_ptr(p, conn->conn_cpumask); } -#else - -void iscsit_thread_get_cpumask(struct iscsi_conn *conn) +static int handle_immediate_queue(struct iscsi_conn *conn) { - return; + struct iscsi_queue_req *qr; + struct iscsi_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { + atomic_set(&conn->check_immediate_queue, 0); + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + + switch (state) { + case ISTATE_SEND_R2T: + ret = iscsit_send_r2t(cmd, conn); + if (ret < 0) + goto err; + break; + case ISTATE_REMOVE: + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + + spin_lock_bh(&conn->cmd_lock); + list_del(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + iscsit_free_cmd(cmd); + continue; + case ISTATE_SEND_NOPIN_WANT_RESPONSE: + iscsit_mod_nopin_response_timer(conn); + ret = iscsit_send_unsolicited_nopin(cmd, + conn, 1); + if (ret < 0) + goto err; + break; + case ISTATE_SEND_NOPIN_NO_RESPONSE: + ret = iscsit_send_unsolicited_nopin(cmd, + conn, 0); + if (ret < 0) + goto err; + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, state, + conn->cid); + goto err; + } + } + + return 0; + +err: + return -1; } -#define iscsit_thread_check_cpumask(X, Y, Z) ({}) -#endif /* CONFIG_SMP */ +static int handle_response_queue(struct iscsi_conn *conn) +{ + struct iscsi_queue_req *qr; + struct iscsi_cmd *cmd; + u8 state; + int ret; + + while ((qr = iscsit_get_cmd_from_response_queue(conn))) { + cmd = qr->cmd; + state = qr->state; + kmem_cache_free(lio_qr_cache, qr); + +check_rsp_state: + switch (state) { + case ISTATE_SEND_DATAIN: + ret = iscsit_send_data_in(cmd, conn); + if (ret < 0) + goto err; + else if (!ret) + /* more drs */ + goto check_rsp_state; + else if (ret == 1) { + /* all done */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + continue; + } else if (ret == 2) { + /* Still must send status, + SCF_TRANSPORT_TASK_SENSE was set */ + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SEND_STATUS; + spin_unlock_bh(&cmd->istate_lock); + state = ISTATE_SEND_STATUS; + goto check_rsp_state; + } + + break; + case ISTATE_SEND_STATUS: + case ISTATE_SEND_STATUS_RECOVERY: + ret = iscsit_send_status(cmd, conn); + break; + case ISTATE_SEND_LOGOUTRSP: + ret = iscsit_send_logout_response(cmd, conn); + break; + case ISTATE_SEND_ASYNCMSG: + ret = iscsit_send_conn_drop_async_message( + cmd, conn); + break; + case ISTATE_SEND_NOPIN: + ret = iscsit_send_nopin_response(cmd, conn); + break; + case ISTATE_SEND_REJECT: + ret = iscsit_send_reject(cmd, conn); + break; + case ISTATE_SEND_TASKMGTRSP: + ret = iscsit_send_task_mgt_rsp(cmd, conn); + if (ret != 0) + break; + ret = iscsit_tmr_post_handler(cmd, conn); + if (ret != 0) + iscsit_fall_back_to_erl0(conn->sess); + break; + case ISTATE_SEND_TEXTRSP: + ret = iscsit_send_text_rsp(cmd, conn); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + state, conn->cid); + goto err; + } + if (ret < 0) + goto err; + + if (iscsit_send_tx_data(cmd, conn, 1) < 0) { + iscsit_tx_thread_wait_for_tcp(conn); + iscsit_unmap_iovec(cmd); + goto err; + } + iscsit_unmap_iovec(cmd); + + switch (state) { + case ISTATE_SEND_LOGOUTRSP: + if (!iscsit_logout_post_handler(cmd, conn)) + goto restart; + /* fall through */ + case ISTATE_SEND_STATUS: + case ISTATE_SEND_ASYNCMSG: + case ISTATE_SEND_NOPIN: + case ISTATE_SEND_STATUS_RECOVERY: + case ISTATE_SEND_TEXTRSP: + case ISTATE_SEND_TASKMGTRSP: + spin_lock_bh(&cmd->istate_lock); + cmd->i_state = ISTATE_SENT_STATUS; + spin_unlock_bh(&cmd->istate_lock); + break; + case ISTATE_SEND_REJECT: + if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { + cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; + complete(&cmd->reject_comp); + goto err; + } + complete(&cmd->reject_comp); + break; + default: + pr_err("Unknown Opcode: 0x%02x ITT:" + " 0x%08x, i_state: %d on CID: %hu\n", + cmd->iscsi_opcode, cmd->init_task_tag, + cmd->i_state, conn->cid); + goto err; + } + + if (atomic_read(&conn->check_immediate_queue)) + break; + } + + return 0; + +err: + return -1; +restart: + return -EAGAIN; +} int iscsi_target_tx_thread(void *arg) { - u8 state; - int eodr = 0; int ret = 0; - int sent_status = 0; - int use_misc = 0; - int map_sg = 0; - struct iscsi_cmd *cmd = NULL; struct iscsi_conn *conn; - struct iscsi_queue_req *qr = NULL; struct iscsi_thread_set *ts = arg; /* * Allow ourselves to be interrupted by SIGINT so that a @@ -3516,7 +3735,7 @@ restart: if (!conn) goto out; - eodr = map_sg = ret = sent_status = use_misc = 0; + ret = 0; while (!kthread_should_stop()) { /* @@ -3531,251 +3750,15 @@ restart: signal_pending(current)) goto transport_err; -get_immediate: - qr = iscsit_get_cmd_from_immediate_queue(conn); - if (qr) { - atomic_set(&conn->check_immediate_queue, 0); - cmd = qr->cmd; - state = qr->state; - kmem_cache_free(lio_qr_cache, qr); - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_R2T: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_r2t(cmd, conn); - break; - case ISTATE_REMOVE: - spin_unlock_bh(&cmd->istate_lock); - - if (cmd->data_direction == DMA_TO_DEVICE) - iscsit_stop_dataout_timer(cmd); - - spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_list); - spin_unlock_bh(&conn->cmd_lock); - - iscsit_free_cmd(cmd); - goto get_immediate; - case ISTATE_SEND_NOPIN_WANT_RESPONSE: - spin_unlock_bh(&cmd->istate_lock); - iscsit_mod_nopin_response_timer(conn); - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 1); - break; - case ISTATE_SEND_NOPIN_NO_RESPONSE: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_unsolicited_nopin(cmd, - conn, 0); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, state, - conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - if (ret < 0) { - conn->tx_immediate_queue = 0; - goto transport_err; - } - - if (iscsit_send_tx_data(cmd, conn, 1) < 0) { - conn->tx_immediate_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - goto transport_err; - } - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_R2T: - spin_unlock_bh(&cmd->istate_lock); - spin_lock_bh(&cmd->dataout_timeout_lock); - iscsit_start_dataout_timer(cmd, conn); - spin_unlock_bh(&cmd->dataout_timeout_lock); - break; - case ISTATE_SEND_NOPIN_WANT_RESPONSE: - cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE; - spin_unlock_bh(&cmd->istate_lock); - break; - case ISTATE_SEND_NOPIN_NO_RESPONSE: - cmd->i_state = ISTATE_SENT_STATUS; - spin_unlock_bh(&cmd->istate_lock); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - goto get_immediate; - } else - conn->tx_immediate_queue = 0; - -get_response: - qr = iscsit_get_cmd_from_response_queue(conn); - if (qr) { - cmd = qr->cmd; - state = qr->state; - kmem_cache_free(lio_qr_cache, qr); - - spin_lock_bh(&cmd->istate_lock); -check_rsp_state: - switch (state) { - case ISTATE_SEND_DATAIN: - spin_unlock_bh(&cmd->istate_lock); - ret = iscsit_send_data_in(cmd, conn, - &eodr); - map_sg = 1; - break; - case ISTATE_SEND_STATUS: - case ISTATE_SEND_STATUS_RECOVERY: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_status(cmd, conn); - break; - case ISTATE_SEND_LOGOUTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_logout_response(cmd, conn); - break; - case ISTATE_SEND_ASYNCMSG: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_conn_drop_async_message( - cmd, conn); - break; - case ISTATE_SEND_NOPIN: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_nopin_response(cmd, conn); - break; - case ISTATE_SEND_REJECT: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_reject(cmd, conn); - break; - case ISTATE_SEND_TASKMGTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_task_mgt_rsp(cmd, conn); - if (ret != 0) - break; - ret = iscsit_tmr_post_handler(cmd, conn); - if (ret != 0) - iscsit_fall_back_to_erl0(conn->sess); - break; - case ISTATE_SEND_TEXTRSP: - spin_unlock_bh(&cmd->istate_lock); - use_misc = 1; - ret = iscsit_send_text_rsp(cmd, conn); - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - if (ret < 0) { - conn->tx_response_queue = 0; - goto transport_err; - } - - if (map_sg && !conn->conn_ops->IFMarker) { - if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { - conn->tx_response_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto transport_err; - } - } else { - if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) { - conn->tx_response_queue = 0; - iscsit_tx_thread_wait_for_tcp(conn); - iscsit_unmap_iovec(cmd); - goto transport_err; - } - } - map_sg = 0; - iscsit_unmap_iovec(cmd); - - spin_lock_bh(&cmd->istate_lock); - switch (state) { - case ISTATE_SEND_DATAIN: - if (!eodr) - goto check_rsp_state; - - if (eodr == 1) { - cmd->i_state = ISTATE_SENT_LAST_DATAIN; - sent_status = 1; - eodr = use_misc = 0; - } else if (eodr == 2) { - cmd->i_state = state = - ISTATE_SEND_STATUS; - sent_status = 0; - eodr = use_misc = 0; - goto check_rsp_state; - } - break; - case ISTATE_SEND_STATUS: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_ASYNCMSG: - case ISTATE_SEND_NOPIN: - case ISTATE_SEND_STATUS_RECOVERY: - case ISTATE_SEND_TEXTRSP: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_REJECT: - use_misc = 0; - if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { - cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; - spin_unlock_bh(&cmd->istate_lock); - complete(&cmd->reject_comp); - goto transport_err; - } - complete(&cmd->reject_comp); - break; - case ISTATE_SEND_TASKMGTRSP: - use_misc = 0; - sent_status = 1; - break; - case ISTATE_SEND_LOGOUTRSP: - spin_unlock_bh(&cmd->istate_lock); - if (!iscsit_logout_post_handler(cmd, conn)) - goto restart; - spin_lock_bh(&cmd->istate_lock); - use_misc = 0; - sent_status = 1; - break; - default: - pr_err("Unknown Opcode: 0x%02x ITT:" - " 0x%08x, i_state: %d on CID: %hu\n", - cmd->iscsi_opcode, cmd->init_task_tag, - cmd->i_state, conn->cid); - spin_unlock_bh(&cmd->istate_lock); - goto transport_err; - } - - if (sent_status) { - cmd->i_state = ISTATE_SENT_STATUS; - sent_status = 0; - } - spin_unlock_bh(&cmd->istate_lock); - - if (atomic_read(&conn->check_immediate_queue)) - goto get_immediate; + ret = handle_immediate_queue(conn); + if (ret < 0) + goto transport_err; - goto get_response; - } else - conn->tx_response_queue = 0; + ret = handle_response_queue(conn); + if (ret == -EAGAIN) + goto restart; + else if (ret < 0) + goto transport_err; } transport_err: @@ -3952,9 +3935,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) * has been reset -> returned sleeping pre-handler state. */ spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -3972,7 +3955,7 @@ static void iscsit_stop_timers_for_cmds( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->data_direction == DMA_TO_DEVICE) iscsit_stop_dataout_timer(cmd); } diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h index 5db2ddeed5e..12abb4c9e34 100644 --- a/drivers/target/iscsi/iscsi_target.h +++ b/drivers/target/iscsi/iscsi_target.h @@ -18,8 +18,7 @@ extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *); extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8); -extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *); -extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int); +extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery); extern void iscsit_thread_get_cpumask(struct iscsi_conn *); extern int iscsi_target_tx_thread(void *); extern int iscsi_target_rx_thread(void *); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 00c58cc82c8..69dc8e35c03 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1538,7 +1538,7 @@ static int lio_write_pending(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); if (!cmd->immediate_data && !cmd->unsolicited_data) - return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1); + return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); return 0; } diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 2aaee7efa68..1c70144cdaf 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -296,12 +296,11 @@ struct iscsi_datain_req { u32 runlength; u32 data_length; u32 data_offset; - u32 data_offset_end; u32 data_sn; u32 next_burst_len; u32 read_data_done; u32 seq_send_order; - struct list_head dr_list; + struct list_head cmd_datain_node; } ____cacheline_aligned; struct iscsi_ooo_cmdsn { @@ -381,8 +380,6 @@ struct iscsi_cmd { u32 buf_ptr_size; /* Used to store DataDigest */ u32 data_crc; - /* Total size in bytes associated with command */ - u32 data_length; /* Counter for MaxOutstandingR2T */ u32 outstanding_r2ts; /* Next R2T Offset when DataSequenceInOrder=Yes */ @@ -464,16 +461,13 @@ struct iscsi_cmd { /* Session the command is part of, used for connection recovery */ struct iscsi_session *sess; /* list_head for connection list */ - struct list_head i_list; + struct list_head i_conn_node; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd se_cmd; /* Sense buffer that will be mapped into outgoing status */ #define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2) unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN]; - struct scatterlist *t_mem_sg; - u32 t_mem_sg_nents; - u32 padding; u8 pad_bytes[4]; @@ -500,8 +494,6 @@ struct iscsi_conn { u8 network_transport; enum iscsi_timer_flags_table nopin_timer_flags; enum iscsi_timer_flags_table nopin_response_timer_flags; - u8 tx_immediate_queue; - u8 tx_response_queue; /* Used to know what thread encountered a transport failure */ u8 which_thread; /* connection id assigned by the Initiator */ diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index 8c049512951..848fee76894 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -37,7 +37,7 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void) " struct iscsi_datain_req\n"); return NULL; } - INIT_LIST_HEAD(&dr->dr_list); + INIT_LIST_HEAD(&dr->cmd_datain_node); return dr; } @@ -45,14 +45,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void) void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) { spin_lock(&cmd->datain_lock); - list_add_tail(&dr->dr_list, &cmd->datain_list); + list_add_tail(&dr->cmd_datain_node, &cmd->datain_list); spin_unlock(&cmd->datain_lock); } void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr) { spin_lock(&cmd->datain_lock); - list_del(&dr->dr_list); + list_del(&dr->cmd_datain_node); spin_unlock(&cmd->datain_lock); kmem_cache_free(lio_dr_cache, dr); @@ -63,8 +63,8 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd) struct iscsi_datain_req *dr, *dr_tmp; spin_lock(&cmd->datain_lock); - list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) { - list_del(&dr->dr_list); + list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) { + list_del(&dr->cmd_datain_node); kmem_cache_free(lio_dr_cache, dr); } spin_unlock(&cmd->datain_lock); @@ -72,17 +72,14 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd) struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd) { - struct iscsi_datain_req *dr; - if (list_empty(&cmd->datain_list)) { pr_err("cmd->datain_list is empty for ITT:" " 0x%08x\n", cmd->init_task_tag); return NULL; } - list_for_each_entry(dr, &cmd->datain_list, dr_list) - break; - return dr; + return list_first_entry(&cmd->datain_list, struct iscsi_datain_req, + cmd_datain_node); } /* @@ -113,7 +110,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes( read_data_done = (!dr->recovery) ? cmd->read_data_done : dr->read_data_done; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -212,7 +209,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( seq_send_order = (!dr->recovery) ? cmd->seq_send_order : dr->seq_send_order; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -231,8 +228,8 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( offset = (seq->offset + seq->next_burst_len); if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { - datain->length = (cmd->data_length - offset); + cmd->se_cmd.data_length) { + datain->length = (cmd->se_cmd.data_length - offset); datain->offset = offset; datain->flags |= ISCSI_FLAG_CMD_FINAL; @@ -264,7 +261,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes( } } - if ((read_data_done + datain->length) == cmd->data_length) + if ((read_data_done + datain->length) == cmd->se_cmd.data_length) datain->flags |= ISCSI_FLAG_DATA_STATUS; datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; @@ -333,7 +330,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no( read_data_done = (!dr->recovery) ? cmd->read_data_done : dr->read_data_done; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -344,7 +341,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no( if (!pdu) return dr; - if ((read_data_done + pdu->length) == cmd->data_length) { + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) { pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS); if (conn->sess->sess_ops->ErrorRecoveryLevel > 0) pdu->flags |= ISCSI_FLAG_DATA_ACK; @@ -433,7 +430,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no( seq_send_order = (!dr->recovery) ? cmd->seq_send_order : dr->seq_send_order; - read_data_left = (cmd->data_length - read_data_done); + read_data_left = (cmd->se_cmd.data_length - read_data_done); if (!read_data_left) { pr_err("ITT: 0x%08x read_data_left is zero!\n", cmd->init_task_tag); @@ -463,7 +460,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no( } else seq->next_burst_len += pdu->length; - if ((read_data_done + pdu->length) == cmd->data_length) + if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) pdu->flags |= ISCSI_FLAG_DATA_STATUS; pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++; diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 1ab0560b092..1a02016ecda 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -48,9 +48,9 @@ void iscsit_set_dataout_sequence_values( if (cmd->unsolicited_data) { cmd->seq_start_offset = cmd->write_data_done; cmd->seq_end_offset = (cmd->write_data_done + - (cmd->data_length > + (cmd->se_cmd.data_length > conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length); + conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length); return; } @@ -59,15 +59,15 @@ void iscsit_set_dataout_sequence_values( if (!cmd->seq_start_offset && !cmd->seq_end_offset) { cmd->seq_start_offset = cmd->write_data_done; - cmd->seq_end_offset = (cmd->data_length > + cmd->seq_end_offset = (cmd->se_cmd.data_length > conn->sess->sess_ops->MaxBurstLength) ? (cmd->write_data_done + - conn->sess->sess_ops->MaxBurstLength) : cmd->data_length; + conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length; } else { cmd->seq_start_offset = cmd->seq_end_offset; cmd->seq_end_offset = ((cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength) >= - cmd->data_length) ? cmd->data_length : + cmd->se_cmd.data_length) ? cmd->se_cmd.data_length : (cmd->seq_end_offset + conn->sess->sess_ops->MaxBurstLength); } @@ -182,13 +182,13 @@ static int iscsit_dataout_check_unsolicited_sequence( if (!conn->sess->sess_ops->DataPDUInOrder) goto out; - if ((first_burst_len != cmd->data_length) && + if ((first_burst_len != cmd->se_cmd.data_length) && (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data" " received %u does not equal FirstBurstLength: %u, and" " does not equal ExpXferLen %u.\n", first_burst_len, conn->sess->sess_ops->FirstBurstLength, - cmd->data_length); + cmd->se_cmd.data_length); transport_send_check_condition_and_sense(&cmd->se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return DATAOUT_CANNOT_RECOVER; @@ -201,10 +201,10 @@ static int iscsit_dataout_check_unsolicited_sequence( conn->sess->sess_ops->FirstBurstLength); return DATAOUT_CANNOT_RECOVER; } - if (first_burst_len == cmd->data_length) { + if (first_burst_len == cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol" - " error.\n", cmd->init_task_tag, cmd->data_length); + " error.\n", cmd->init_task_tag, cmd->se_cmd.data_length); return DATAOUT_CANNOT_RECOVER; } } @@ -294,7 +294,7 @@ static int iscsit_dataout_check_sequence( if ((next_burst_len < conn->sess->sess_ops->MaxBurstLength) && ((cmd->write_data_done + payload_length) < - cmd->data_length)) { + cmd->se_cmd.data_length)) { pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL" " before end of DataOUT sequence, protocol" " error.\n", cmd->init_task_tag); @@ -319,7 +319,7 @@ static int iscsit_dataout_check_sequence( return DATAOUT_CANNOT_RECOVER; } if ((cmd->write_data_done + payload_length) == - cmd->data_length) { + cmd->se_cmd.data_length) { pr_err("Command ITT: 0x%08x reached" " last DataOUT PDU in sequence but ISCSI_FLAG_" "CMD_FINAL is not set, protocol error.\n", @@ -640,9 +640,12 @@ static int iscsit_dataout_post_crc_passed( cmd->write_data_done += payload_length; - return (cmd->write_data_done == cmd->data_length) ? - DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ? - DATAOUT_SEND_R2T : DATAOUT_NORMAL; + if (cmd->write_data_done == cmd->se_cmd.data_length) + return DATAOUT_SEND_TO_TRANSPORT; + else if (send_r2t) + return DATAOUT_SEND_R2T; + else + return DATAOUT_NORMAL; } static int iscsit_dataout_post_crc_failed( diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 006f605edb0..ecdd46deedd 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -279,11 +279,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( * seq->first_datasn and seq->last_datasn have not been set. */ if (!seq->sent) { -#if 0 pr_err("Ignoring non-sent sequence 0x%08x ->" " 0x%08x\n\n", seq->first_datasn, seq->last_datasn); -#endif continue; } @@ -294,11 +292,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn < begrun) && (seq->last_datasn < begrun)) { -#if 0 pr_err("Pre BegRun sequence 0x%08x ->" " 0x%08x\n", seq->first_datasn, seq->last_datasn); -#endif + read_data_done += cmd->seq_list[i].xfer_len; seq->next_burst_len = seq->pdu_send_order = 0; continue; @@ -309,11 +306,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn <= begrun) && (seq->last_datasn >= begrun)) { -#if 0 pr_err("Found sequence begrun: 0x%08x in" " 0x%08x -> 0x%08x\n", begrun, seq->first_datasn, seq->last_datasn); -#endif + seq_send_order = seq->seq_send_order; data_sn = seq->first_datasn; seq->next_burst_len = seq->pdu_send_order = 0; @@ -369,10 +365,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no( */ if ((seq->first_datasn > begrun) || (seq->last_datasn > begrun)) { -#if 0 pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n", seq->first_datasn, seq->last_datasn); -#endif + seq->next_burst_len = seq->pdu_send_order = 0; continue; } @@ -526,7 +521,7 @@ int iscsit_handle_status_snack( found_cmd = 0; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->stat_sn == begrun) { found_cmd = 1; break; @@ -987,7 +982,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) return 0; iscsit_set_dataout_sequence_values(cmd); - iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0); + iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false); } return 0; } @@ -1121,8 +1116,8 @@ static int iscsit_set_dataout_timeout_values( if (cmd->unsolicited_data) { *offset = 0; *length = (conn->sess->sess_ops->FirstBurstLength > - cmd->data_length) ? - cmd->data_length : + cmd->se_cmd.data_length) ? + cmd->se_cmd.data_length : conn->sess->sess_ops->FirstBurstLength; return 0; } @@ -1193,8 +1188,8 @@ static void iscsit_handle_dataout_timeout(unsigned long data) if (conn->sess->sess_ops->DataPDUInOrder) { pdu_offset = cmd->write_data_done; if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength - - cmd->next_burst_len)) > cmd->data_length) - pdu_length = (cmd->data_length - + cmd->next_burst_len)) > cmd->se_cmd.data_length) + pdu_length = (cmd->se_cmd.data_length - cmd->write_data_done); else pdu_length = (conn->sess->sess_ops->MaxBurstLength - diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 1af1f21af21..65aac14fd83 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,9 +138,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd); @@ -160,9 +160,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd); @@ -220,7 +220,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); return --cr->cmd_count; } @@ -234,7 +234,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( spin_lock(&cr->conn_recovery_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, - &cr->conn_recovery_cmd_list, i_list) { + &cr->conn_recovery_cmd_list, i_conn_node) { if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) && (cmd->deferred_i_state != ISTATE_REMOVE)) || @@ -297,11 +297,11 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) mutex_unlock(&sess->cmdsn_mutex); spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); @@ -339,14 +339,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_list); to release the command to the + * list_del(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after * sending the TMR response. */ spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { + list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) && (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) { @@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); @@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); @@ -397,7 +397,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_list); + list_del(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); @@ -407,7 +407,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) * Add the struct iscsi_cmd to the connection recovery cmd list */ spin_lock(&cr->conn_recovery_cmd_lock); - list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list); + list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list); spin_unlock(&cr->conn_recovery_cmd_lock); spin_lock_bh(&conn->cmd_lock); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index eb05c9d751e..ed5241e7f12 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -803,14 +803,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt value = simple_strtoul(value_ptr, &tmpptr, 0); -/* #warning FIXME: Fix this */ -#if 0 - if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) { - pr_err("Illegal value \"%s\" for \"%s\".\n", - value, param->name); - return -1; - } -#endif if (IS_TYPERANGE_0_TO_2(param)) { if ((value < 0) || (value > 2)) { pr_err("Illegal value for \"%s\", must be" @@ -1045,13 +1037,6 @@ static char *iscsi_check_valuelist_for_support( tmp2 = strchr(acceptor_values, ','); if (tmp2) *tmp2 = '\0'; - if (!acceptor_values || !proposer_values) { - if (tmp1) - *tmp1 = ','; - if (tmp2) - *tmp2 = ','; - return NULL; - } if (!strcmp(acceptor_values, proposer_values)) { if (tmp2) *tmp2 = ','; @@ -1061,8 +1046,6 @@ static char *iscsi_check_valuelist_for_support( *tmp2++ = ','; acceptor_values = tmp2; - if (!acceptor_values) - break; } while (acceptor_values); if (tmp1) *tmp1++ = ','; diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c index fc694082bfc..85a306e067b 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c @@ -24,11 +24,13 @@ #include "iscsi_target_core.h" #include "iscsi_target_util.h" +#include "iscsi_target_tpg.h" #include "iscsi_target_seq_pdu_list.h" #define OFFLOAD_BUF_SIZE 32768 -void iscsit_dump_seq_list(struct iscsi_cmd *cmd) +#ifdef DEBUG +static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) { int i; struct iscsi_seq *seq; @@ -46,7 +48,7 @@ void iscsit_dump_seq_list(struct iscsi_cmd *cmd) } } -void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) +static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) { int i; struct iscsi_pdu *pdu; @@ -61,6 +63,10 @@ void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) pdu->length, pdu->pdu_send_order, pdu->seq_no); } } +#else +static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {} +static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {} +#endif static void iscsit_ordered_seq_lists( struct iscsi_cmd *cmd, @@ -135,11 +141,11 @@ redo: seq_count++; continue; } - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory" " for random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -155,11 +161,11 @@ redo: } if (seq_count) { - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for" " random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -187,10 +193,10 @@ static int iscsit_randomize_seq_lists( if (!seq_count) return 0; - array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); + array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for random array.\n"); - return -1; + return -ENOMEM; } iscsit_create_random_array(array, seq_count); @@ -221,11 +227,10 @@ static void iscsit_determine_counts_for_list( if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) - unsolicited_data_length = (cmd->data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length; + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); - while (offset < cmd->data_length) { + while (offset < cmd->se_cmd.data_length) { *pdu_count += 1; if (check_immediate) { @@ -239,10 +244,10 @@ static void iscsit_determine_counts_for_list( } if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) - >= cmd->data_length) { + >= cmd->se_cmd.data_length) { unsolicited_data_length -= - (cmd->data_length - offset); - offset += (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) @@ -263,8 +268,8 @@ static void iscsit_determine_counts_for_list( continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { - offset += (cmd->data_length - offset); + cmd->se_cmd.data_length) { + offset += (cmd->se_cmd.data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= @@ -283,10 +288,10 @@ static void iscsit_determine_counts_for_list( /* - * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No - * and DataPDUInOrder=No. + * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No + * or DataPDUInOrder=No. */ -static int iscsit_build_pdu_and_seq_list( +static int iscsit_do_build_pdu_and_seq_lists( struct iscsi_cmd *cmd, struct iscsi_build_list *bl) { @@ -306,11 +311,10 @@ static int iscsit_build_pdu_and_seq_list( if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) - unsolicited_data_length = (cmd->data_length > - conn->sess->sess_ops->FirstBurstLength) ? - conn->sess->sess_ops->FirstBurstLength : cmd->data_length; + unsolicited_data_length = min(cmd->se_cmd.data_length, + conn->sess->sess_ops->FirstBurstLength); - while (offset < cmd->data_length) { + while (offset < cmd->se_cmd.data_length) { pdu_count++; if (!datapduinorder) { pdu[i].offset = offset; @@ -346,21 +350,21 @@ static int iscsit_build_pdu_and_seq_list( if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { + cmd->se_cmd.data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_UNSOLICITED; pdu[i].length = - (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_UNSOLICITED; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + - (cmd->data_length - offset)); + (cmd->se_cmd.data_length - offset)); } unsolicited_data_length -= - (cmd->data_length - offset); - offset += (cmd->data_length - offset); + (cmd->se_cmd.data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((offset + @@ -402,18 +406,18 @@ static int iscsit_build_pdu_and_seq_list( continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= - cmd->data_length) { + cmd->se_cmd.data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_NORMAL; - pdu[i].length = (cmd->data_length - offset); + pdu[i].length = (cmd->se_cmd.data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_NORMAL; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + - (cmd->data_length - offset)); + (cmd->se_cmd.data_length - offset)); } - offset += (cmd->data_length - offset); + offset += (cmd->se_cmd.data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= @@ -464,9 +468,8 @@ static int iscsit_build_pdu_and_seq_list( } else iscsit_ordered_seq_lists(cmd, bl->type); } -#if 0 + iscsit_dump_seq_list(cmd); -#endif } if (!datapduinorder) { if (bl->data_direction & ISCSI_PDU_WRITE) { @@ -484,50 +487,86 @@ static int iscsit_build_pdu_and_seq_list( } else iscsit_ordered_pdu_lists(cmd, bl->type); } -#if 0 + iscsit_dump_pdu_list(cmd); -#endif } return 0; } -/* - * Only called while DataSequenceInOrder=No or DataPDUInOrder=No. - */ -int iscsit_do_build_list( +int iscsit_build_pdu_and_seq_lists( struct iscsi_cmd *cmd, - struct iscsi_build_list *bl) + u32 immediate_data_length) { + struct iscsi_build_list bl; u32 pdu_count = 0, seq_count = 1; struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu = NULL; struct iscsi_seq *seq = NULL; - iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count); + struct iscsi_session *sess = conn->sess; + struct iscsi_node_attrib *na; + + /* + * Do nothing if no OOO shenanigans + */ + if (sess->sess_ops->DataSequenceInOrder && + sess->sess_ops->DataPDUInOrder) + return 0; + + if (cmd->data_direction == DMA_NONE) + return 0; + + na = iscsit_tpg_get_node_attrib(sess); + memset(&bl, 0, sizeof(struct iscsi_build_list)); + + if (cmd->data_direction == DMA_FROM_DEVICE) { + bl.data_direction = ISCSI_PDU_READ; + bl.type = PDULIST_NORMAL; + if (na->random_datain_pdu_offsets) + bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; + if (na->random_datain_seq_offsets) + bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; + } else { + bl.data_direction = ISCSI_PDU_WRITE; + bl.immediate_data_length = immediate_data_length; + if (na->random_r2t_offsets) + bl.randomize |= RANDOM_R2T_OFFSETS; + + if (!cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_NORMAL; + else if (cmd->immediate_data && !cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE; + else if (!cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_UNSOLICITED; + else if (cmd->immediate_data && cmd->unsolicited_data) + bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; + } + + iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count); if (!conn->sess->sess_ops->DataSequenceInOrder) { - seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC); + seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC); if (!seq) { pr_err("Unable to allocate struct iscsi_seq list\n"); - return -1; + return -ENOMEM; } cmd->seq_list = seq; cmd->seq_count = seq_count; } if (!conn->sess->sess_ops->DataPDUInOrder) { - pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC); + pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC); if (!pdu) { pr_err("Unable to allocate struct iscsi_pdu list.\n"); kfree(seq); - return -1; + return -ENOMEM; } cmd->pdu_list = pdu; cmd->pdu_count = pdu_count; } - return iscsit_build_pdu_and_seq_list(cmd, bl); + return iscsit_do_build_pdu_and_seq_lists(cmd, &bl); } struct iscsi_pdu *iscsit_get_pdu_holder( @@ -572,13 +611,12 @@ redo: pdu = &cmd->pdu_list[cmd->pdu_start]; for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) { -#if 0 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu" "_send_order: %d, pdu[i].offset: %d," " pdu[i].length: %d\n", pdu[i].seq_no, pdu[i].pdu_send_order, pdu[i].offset, pdu[i].length); -#endif + if (pdu[i].pdu_send_order == cmd->pdu_send_order) { cmd->pdu_send_order++; return &pdu[i]; @@ -601,11 +639,11 @@ redo: pr_err("struct iscsi_seq is NULL!\n"); return NULL; } -#if 0 + pr_debug("seq->pdu_start: %d, seq->pdu_count: %d," " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count, seq->seq_no); -#endif + pdu = &cmd->pdu_list[seq->pdu_start]; if (seq->pdu_send_order == seq->pdu_count) { @@ -645,12 +683,11 @@ struct iscsi_seq *iscsit_get_seq_holder( } for (i = 0; i < cmd->seq_count; i++) { -#if 0 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]." "xfer_len: %d, seq_list[i].seq_no %u\n", cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len, cmd->seq_list[i].seq_no); -#endif + if ((cmd->seq_list[i].orig_offset + cmd->seq_list[i].xfer_len) >= (offset + length)) diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h index 0d52a10e306..d5b153751a8 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h @@ -78,7 +78,7 @@ struct iscsi_seq { u32 xfer_len; } ____cacheline_aligned; -extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *); +extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32); extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *); extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index e01da9d2b37..f4e640b51fd 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -78,10 +78,7 @@ int iscsit_tmr_task_warm_reset( { struct iscsi_session *sess = conn->sess; struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); -#if 0 - struct iscsi_init_task_mgt_cmnd *hdr = - (struct iscsi_init_task_mgt_cmnd *) buf; -#endif + if (!na->tmr_warm_reset) { pr_err("TMR Opcode TARGET_WARM_RESET authorization" " failed for Initiator Node: %s\n", @@ -216,7 +213,7 @@ static int iscsit_task_reassign_complete_nop_out( iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); cmd->i_state = ISTATE_SEND_NOPIN; @@ -272,9 +269,9 @@ static int iscsit_task_reassign_complete_write( offset = cmd->next_burst_len = cmd->write_data_done; if ((conn->sess->sess_ops->FirstBurstLength - offset) >= - cmd->data_length) { + cmd->se_cmd.data_length) { no_build_r2ts = 1; - length = (cmd->data_length - offset); + length = (cmd->se_cmd.data_length - offset); } else length = (conn->sess->sess_ops->FirstBurstLength - offset); @@ -292,7 +289,7 @@ static int iscsit_task_reassign_complete_write( /* * iscsit_build_r2ts_for_cmd() can handle the rest from here. */ - return iscsit_build_r2ts_for_cmd(cmd, conn, 2); + return iscsit_build_r2ts_for_cmd(cmd, conn, true); } static int iscsit_task_reassign_complete_read( @@ -385,7 +382,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 4eba86d2bd8..b42cdeb153d 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -163,7 +163,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) } cmd->conn = conn; - INIT_LIST_HEAD(&cmd->i_list); + INIT_LIST_HEAD(&cmd->i_conn_node); INIT_LIST_HEAD(&cmd->datain_list); INIT_LIST_HEAD(&cmd->cmd_r2t_list); init_completion(&cmd->reject_comp); @@ -176,174 +176,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) return cmd; } -/* - * Called from iscsi_handle_scsi_cmd() - */ -struct iscsi_cmd *iscsit_allocate_se_cmd( - struct iscsi_conn *conn, - u32 data_length, - int data_direction, - int iscsi_task_attr) -{ - struct iscsi_cmd *cmd; - struct se_cmd *se_cmd; - int sam_task_attr; - - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->data_direction = data_direction; - cmd->data_length = data_length; - /* - * Figure out the SAM Task Attribute for the incoming SCSI CDB - */ - if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || - (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) - sam_task_attr = MSG_SIMPLE_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) - sam_task_attr = MSG_ORDERED_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) - sam_task_attr = MSG_HEAD_TAG; - else if (iscsi_task_attr == ISCSI_ATTR_ACA) - sam_task_attr = MSG_ACA_TAG; - else { - pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" - " MSG_SIMPLE_TAG\n", iscsi_task_attr); - sam_task_attr = MSG_SIMPLE_TAG; - } - - se_cmd = &cmd->se_cmd; - /* - * Initialize struct se_cmd descriptor from target_core_mod infrastructure - */ - transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, - conn->sess->se_sess, data_length, data_direction, - sam_task_attr, &cmd->sense_buffer[0]); - return cmd; -} - -struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( - struct iscsi_conn *conn, - u8 function) -{ - struct iscsi_cmd *cmd; - struct se_cmd *se_cmd; - int rc; - u8 tcm_function; - - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->data_direction = DMA_NONE; - - cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); - if (!cmd->tmr_req) { - pr_err("Unable to allocate memory for" - " Task Management command!\n"); - goto out; - } - /* - * TASK_REASSIGN for ERL=2 / connection stays inside of - * LIO-Target $FABRIC_MOD - */ - if (function == ISCSI_TM_FUNC_TASK_REASSIGN) - return cmd; - - se_cmd = &cmd->se_cmd; - /* - * Initialize struct se_cmd descriptor from target_core_mod infrastructure - */ - transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, - conn->sess->se_sess, 0, DMA_NONE, - MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); - - switch (function) { - case ISCSI_TM_FUNC_ABORT_TASK: - tcm_function = TMR_ABORT_TASK; - break; - case ISCSI_TM_FUNC_ABORT_TASK_SET: - tcm_function = TMR_ABORT_TASK_SET; - break; - case ISCSI_TM_FUNC_CLEAR_ACA: - tcm_function = TMR_CLEAR_ACA; - break; - case ISCSI_TM_FUNC_CLEAR_TASK_SET: - tcm_function = TMR_CLEAR_TASK_SET; - break; - case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: - tcm_function = TMR_LUN_RESET; - break; - case ISCSI_TM_FUNC_TARGET_WARM_RESET: - tcm_function = TMR_TARGET_WARM_RESET; - break; - case ISCSI_TM_FUNC_TARGET_COLD_RESET: - tcm_function = TMR_TARGET_COLD_RESET; - break; - default: - pr_err("Unknown iSCSI TMR Function:" - " 0x%02x\n", function); - goto out; - } - - rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL); - if (rc < 0) - goto out; - - cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; - - return cmd; -out: - iscsit_release_cmd(cmd); - return NULL; -} - -int iscsit_decide_list_to_build( - struct iscsi_cmd *cmd, - u32 immediate_data_length) -{ - struct iscsi_build_list bl; - struct iscsi_conn *conn = cmd->conn; - struct iscsi_session *sess = conn->sess; - struct iscsi_node_attrib *na; - - if (sess->sess_ops->DataSequenceInOrder && - sess->sess_ops->DataPDUInOrder) - return 0; - - if (cmd->data_direction == DMA_NONE) - return 0; - - na = iscsit_tpg_get_node_attrib(sess); - memset(&bl, 0, sizeof(struct iscsi_build_list)); - - if (cmd->data_direction == DMA_FROM_DEVICE) { - bl.data_direction = ISCSI_PDU_READ; - bl.type = PDULIST_NORMAL; - if (na->random_datain_pdu_offsets) - bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; - if (na->random_datain_seq_offsets) - bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; - } else { - bl.data_direction = ISCSI_PDU_WRITE; - bl.immediate_data_length = immediate_data_length; - if (na->random_r2t_offsets) - bl.randomize |= RANDOM_R2T_OFFSETS; - - if (!cmd->immediate_data && !cmd->unsolicited_data) - bl.type = PDULIST_NORMAL; - else if (cmd->immediate_data && !cmd->unsolicited_data) - bl.type = PDULIST_IMMEDIATE; - else if (!cmd->immediate_data && cmd->unsolicited_data) - bl.type = PDULIST_UNSOLICITED; - else if (cmd->immediate_data && cmd->unsolicited_data) - bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; - } - - return iscsit_do_build_list(cmd, &bl); -} - struct iscsi_seq *iscsit_get_seq_holder_for_datain( struct iscsi_cmd *cmd, u32 seq_send_order) @@ -502,14 +334,14 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) return 0; - if (((cmd->first_burst_len + payload_length) != cmd->data_length) && + if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && ((cmd->first_burst_len + payload_length) != conn->sess->sess_ops->FirstBurstLength)) { pr_err("Unsolicited non-immediate data received %u" " does not equal FirstBurstLength: %u, and does" " not equal ExpXferLen %u.\n", (cmd->first_burst_len + payload_length), - conn->sess->sess_ops->FirstBurstLength, cmd->data_length); + conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); transport_send_check_condition_and_sense(se_cmd, TCM_INCORRECT_AMOUNT_OF_DATA, 0); return -1; @@ -524,7 +356,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -545,7 +377,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( struct iscsi_cmd *cmd; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -568,7 +400,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt( struct iscsi_cmd *cmd = NULL; spin_lock_bh(&conn->cmd_lock); - list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { + list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { if (cmd->targ_xfer_tag == targ_xfer_tag) { spin_unlock_bh(&conn->cmd_lock); return cmd; @@ -596,7 +428,7 @@ int iscsit_find_cmd_for_recovery( spin_lock(&sess->cr_i_lock); list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); - list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_i_lock); @@ -616,7 +448,7 @@ int iscsit_find_cmd_for_recovery( spin_lock(&sess->cr_a_lock); list_for_each_entry(cr, &sess->cr_active_list, cr_list) { spin_lock(&cr->conn_recovery_cmd_lock); - list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { + list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { if (cmd->init_task_tag == init_task_tag) { spin_unlock(&cr->conn_recovery_cmd_lock); spin_unlock(&sess->cr_a_lock); @@ -813,7 +645,6 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) void iscsit_release_cmd(struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; - int i; iscsit_free_r2ts_from_list(cmd); iscsit_free_all_datain_reqs(cmd); @@ -824,11 +655,6 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) kfree(cmd->tmr_req); kfree(cmd->iov_data); - for (i = 0; i < cmd->t_mem_sg_nents; i++) - __free_page(sg_page(&cmd->t_mem_sg[i])); - - kfree(cmd->t_mem_sg); - if (conn) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); iscsit_remove_cmd_from_response_queue(cmd, conn); @@ -1038,7 +864,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) spin_unlock_bh(&conn->sess->ttt_lock); spin_lock_bh(&conn->cmd_lock); - list_add_tail(&cmd->i_list, &conn->conn_cmd_list); + list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); if (want_response) diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 835bf7de028..e1c729b8a1c 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -9,9 +9,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *); extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int); -extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8); -extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index a9b4eeefe9f..38dfac2b0a1 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -213,7 +213,7 @@ static void tcm_loop_submission_work(struct work_struct *work) * associated read buffers, go ahead and do that here for type * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB - * by target core in transport_generic_allocate_tasks() -> + * by target core in target_setup_cmd_from_cdb() -> * transport_generic_cmd_sequencer(). */ if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && @@ -227,7 +227,7 @@ static void tcm_loop_submission_work(struct work_struct *work) } } - ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); + ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd); if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig new file mode 100644 index 00000000000..132da544eaf --- /dev/null +++ b/drivers/target/sbp/Kconfig @@ -0,0 +1,11 @@ +config SBP_TARGET + tristate "FireWire SBP-2 fabric module" + depends on FIREWIRE && EXPERIMENTAL + help + Say Y or M here to enable SCSI target functionality over FireWire. + This enables you to expose SCSI devices to other nodes on the FireWire + bus, for example hard disks. Similar to FireWire Target Disk mode on + many Apple computers. + + To compile this driver as a module, say M here: The module will be + called sbp-target. diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile new file mode 100644 index 00000000000..27747ad054c --- /dev/null +++ b/drivers/target/sbp/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SBP_TARGET) += sbp_target.o diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c new file mode 100644 index 00000000000..37c609898f8 --- /dev/null +++ b/drivers/target/sbp/sbp_target.c @@ -0,0 +1,2621 @@ +/* + * SBP2 target driver (SCSI over IEEE1394 in target mode) + * + * Copyright (C) 2011 Chris Boot <bootc@bootc.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define KMSG_COMPONENT "sbp_target" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/firewire.h> +#include <linux/firewire-constants.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> +#include <target/target_core_fabric_configfs.h> +#include <target/target_core_configfs.h> +#include <target/configfs_macros.h> +#include <asm/unaligned.h> + +#include "sbp_target.h" + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *sbp_fabric_configfs; + +/* FireWire address region for management and command block address handlers */ +static const struct fw_address_region sbp_register_region = { + .start = CSR_REGISTER_BASE + 0x10000, + .end = 0x1000000000000ULL, +}; + +static const u32 sbp_unit_directory_template[] = { + 0x1200609e, /* unit_specifier_id: NCITS/T10 */ + 0x13010483, /* unit_sw_version: 1155D Rev 4 */ + 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ + 0x390104d8, /* command_set: SPC-2 */ + 0x3b000000, /* command_set_revision: 0 */ + 0x3c000001, /* firmware_revision: 1 */ +}; + +#define SESSION_MAINTENANCE_INTERVAL HZ + +static atomic_t login_id = ATOMIC_INIT(0); + +static void session_maintenance_work(struct work_struct *); +static int sbp_run_transaction(struct fw_card *, int, int, int, int, + unsigned long long, void *, size_t); + +static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) +{ + int ret; + __be32 high, low; + + ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, + req->node_addr, req->generation, req->speed, + (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, + &high, sizeof(high)); + if (ret != RCODE_COMPLETE) + return ret; + + ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, + req->node_addr, req->generation, req->speed, + (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, + &low, sizeof(low)); + if (ret != RCODE_COMPLETE) + return ret; + + *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); + + return RCODE_COMPLETE; +} + +static struct sbp_session *sbp_session_find_by_guid( + struct sbp_tpg *tpg, u64 guid) +{ + struct se_session *se_sess; + struct sbp_session *sess, *found = NULL; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + if (sess->guid == guid) + found = sess; + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return found; +} + +static struct sbp_login_descriptor *sbp_login_find_by_lun( + struct sbp_session *session, struct se_lun *lun) +{ + struct sbp_login_descriptor *login, *found = NULL; + + spin_lock_bh(&session->lock); + list_for_each_entry(login, &session->login_list, link) { + if (login->lun == lun) + found = login; + } + spin_unlock_bh(&session->lock); + + return found; +} + +static int sbp_login_count_all_by_lun( + struct sbp_tpg *tpg, + struct se_lun *lun, + int exclusive) +{ + struct se_session *se_sess; + struct sbp_session *sess; + struct sbp_login_descriptor *login; + int count = 0; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + + spin_lock_bh(&sess->lock); + list_for_each_entry(login, &sess->login_list, link) { + if (login->lun != lun) + continue; + + if (!exclusive || login->exclusive) + count++; + } + spin_unlock_bh(&sess->lock); + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return count; +} + +static struct sbp_login_descriptor *sbp_login_find_by_id( + struct sbp_tpg *tpg, int login_id) +{ + struct se_session *se_sess; + struct sbp_session *sess; + struct sbp_login_descriptor *login, *found = NULL; + + spin_lock_bh(&tpg->se_tpg.session_lock); + list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { + sess = se_sess->fabric_sess_ptr; + + spin_lock_bh(&sess->lock); + list_for_each_entry(login, &sess->login_list, link) { + if (login->login_id == login_id) + found = login; + } + spin_unlock_bh(&sess->lock); + } + spin_unlock_bh(&tpg->se_tpg.session_lock); + + return found; +} + +static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun) +{ + struct se_portal_group *se_tpg = &tpg->se_tpg; + struct se_lun *se_lun; + + if (lun >= TRANSPORT_MAX_LUNS_PER_TPG) + return ERR_PTR(-EINVAL); + + spin_lock(&se_tpg->tpg_lun_lock); + se_lun = se_tpg->tpg_lun_list[lun]; + + if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) + se_lun = ERR_PTR(-ENODEV); + + spin_unlock(&se_tpg->tpg_lun_lock); + + return se_lun; +} + +static struct sbp_session *sbp_session_create( + struct sbp_tpg *tpg, + u64 guid) +{ + struct sbp_session *sess; + int ret; + char guid_str[17]; + struct se_node_acl *se_nacl; + + sess = kmalloc(sizeof(*sess), GFP_KERNEL); + if (!sess) { + pr_err("failed to allocate session descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + sess->se_sess = transport_init_session(); + if (IS_ERR(sess->se_sess)) { + pr_err("failed to init se_session\n"); + + ret = PTR_ERR(sess->se_sess); + kfree(sess); + return ERR_PTR(ret); + } + + snprintf(guid_str, sizeof(guid_str), "%016llx", guid); + + se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str); + if (!se_nacl) { + pr_warn("Node ACL not found for %s\n", guid_str); + + transport_free_session(sess->se_sess); + kfree(sess); + + return ERR_PTR(-EPERM); + } + + sess->se_sess->se_node_acl = se_nacl; + + spin_lock_init(&sess->lock); + INIT_LIST_HEAD(&sess->login_list); + INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); + + sess->guid = guid; + + transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess); + + return sess; +} + +static void sbp_session_release(struct sbp_session *sess, bool cancel_work) +{ + spin_lock_bh(&sess->lock); + if (!list_empty(&sess->login_list)) { + spin_unlock_bh(&sess->lock); + return; + } + spin_unlock_bh(&sess->lock); + + if (cancel_work) + cancel_delayed_work_sync(&sess->maint_work); + + transport_deregister_session_configfs(sess->se_sess); + transport_deregister_session(sess->se_sess); + + if (sess->card) + fw_card_put(sess->card); + + kfree(sess); +} + +static void sbp_target_agent_unregister(struct sbp_target_agent *); + +static void sbp_login_release(struct sbp_login_descriptor *login, + bool cancel_work) +{ + struct sbp_session *sess = login->sess; + + /* FIXME: abort/wait on tasks */ + + sbp_target_agent_unregister(login->tgt_agt); + + if (sess) { + spin_lock_bh(&sess->lock); + list_del(&login->link); + spin_unlock_bh(&sess->lock); + + sbp_session_release(sess, cancel_work); + } + + kfree(login); +} + +static struct sbp_target_agent *sbp_target_agent_register( + struct sbp_login_descriptor *); + +static void sbp_management_request_login( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + struct se_lun *se_lun; + int ret; + u64 guid; + struct sbp_session *sess; + struct sbp_login_descriptor *login; + struct sbp_login_response_block *response; + int login_response_len; + + se_lun = sbp_get_lun_from_tpg(tpg, + LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); + if (IS_ERR(se_lun)) { + pr_notice("login to unknown LUN: %d\n", + LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); + return; + } + + ret = read_peer_guid(&guid, req); + if (ret != RCODE_COMPLETE) { + pr_warn("failed to read peer GUID: %d\n", ret); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", + se_lun->unpacked_lun, guid); + + sess = sbp_session_find_by_guid(tpg, guid); + if (sess) { + login = sbp_login_find_by_lun(sess, se_lun); + if (login) { + pr_notice("initiator already logged-in\n"); + + /* + * SBP-2 R4 says we should return access denied, but + * that can confuse initiators. Instead we need to + * treat this like a reconnect, but send the login + * response block like a fresh login. + * + * This is required particularly in the case of Apple + * devices booting off the FireWire target, where + * the firmware has an active login to the target. When + * the OS takes control of the session it issues its own + * LOGIN rather than a RECONNECT. To avoid the machine + * waiting until the reconnect_hold expires, we can skip + * the ACCESS_DENIED errors to speed things up. + */ + + goto already_logged_in; + } + } + + /* + * check exclusive bit in login request + * reject with access_denied if any logins present + */ + if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && + sbp_login_count_all_by_lun(tpg, se_lun, 0)) { + pr_warn("refusing exclusive login with other active logins\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + /* + * check exclusive bit in any existing login descriptor + * reject with access_denied if any exclusive logins present + */ + if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) { + pr_warn("refusing login while another exclusive login present\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + /* + * check we haven't exceeded the number of allowed logins + * reject with resources_unavailable if we have + */ + if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >= + tport->max_logins_per_lun) { + pr_warn("max number of logins reached\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + if (!sess) { + sess = sbp_session_create(tpg, guid); + if (IS_ERR(sess)) { + switch (PTR_ERR(sess)) { + case -EPERM: + ret = SBP_STATUS_ACCESS_DENIED; + break; + default: + ret = SBP_STATUS_RESOURCES_UNAVAIL; + break; + } + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(ret)); + return; + } + + sess->node_id = req->node_addr; + sess->card = fw_card_get(req->card); + sess->generation = req->generation; + sess->speed = req->speed; + + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } + + /* only take the latest reconnect_hold into account */ + sess->reconnect_hold = min( + 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), + tport->max_reconnect_timeout) - 1; + + login = kmalloc(sizeof(*login), GFP_KERNEL); + if (!login) { + pr_err("failed to allocate login descriptor\n"); + + sbp_session_release(sess, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + login->sess = sess; + login->lun = se_lun; + login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); + login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); + login->login_id = atomic_inc_return(&login_id); + + login->tgt_agt = sbp_target_agent_register(login); + if (IS_ERR(login->tgt_agt)) { + ret = PTR_ERR(login->tgt_agt); + pr_err("failed to map command block handler: %d\n", ret); + + sbp_session_release(sess, true); + kfree(login); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + spin_lock_bh(&sess->lock); + list_add_tail(&login->link, &sess->login_list); + spin_unlock_bh(&sess->lock); + +already_logged_in: + response = kzalloc(sizeof(*response), GFP_KERNEL); + if (!response) { + pr_err("failed to allocate login response block\n"); + + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); + return; + } + + login_response_len = clamp_val( + LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), + 12, sizeof(*response)); + response->misc = cpu_to_be32( + ((login_response_len & 0xffff) << 16) | + (login->login_id & 0xffff)); + response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); + addr_to_sbp2_pointer(login->tgt_agt->handler.offset, + &response->command_block_agent); + + ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, + sess->node_id, sess->generation, sess->speed, + sbp2_pointer_to_addr(&req->orb.ptr2), response, + login_response_len); + if (ret != RCODE_COMPLETE) { + pr_debug("failed to write login response block: %x\n", ret); + + kfree(response); + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + kfree(response); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void sbp_management_request_query_logins( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + pr_notice("QUERY LOGINS not implemented\n"); + /* FIXME: implement */ + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); +} + +static void sbp_management_request_reconnect( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + int ret; + u64 guid; + struct sbp_login_descriptor *login; + + ret = read_peer_guid(&guid, req); + if (ret != RCODE_COMPLETE) { + pr_warn("failed to read peer GUID: %d\n", ret); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + return; + } + + pr_notice("mgt_agent RECONNECT from %016llx\n", guid); + + login = sbp_login_find_by_id(tpg, + RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); + + if (!login) { + pr_err("mgt_agent RECONNECT unknown login ID\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + if (login->sess->guid != guid) { + pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + spin_lock_bh(&login->sess->lock); + if (login->sess->card) + fw_card_put(login->sess->card); + + /* update the node details */ + login->sess->generation = req->generation; + login->sess->node_id = req->node_addr; + login->sess->card = fw_card_get(req->card); + login->sess->speed = req->speed; + spin_unlock_bh(&login->sess->lock); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void sbp_management_request_logout( + struct sbp_management_agent *agent, struct sbp_management_request *req, + int *status_data_size) +{ + struct sbp_tport *tport = agent->tport; + struct sbp_tpg *tpg = tport->tpg; + int login_id; + struct sbp_login_descriptor *login; + + login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); + + login = sbp_login_find_by_id(tpg, login_id); + if (!login) { + pr_warn("cannot find login: %d\n", login_id); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); + return; + } + + pr_info("mgt_agent LOGOUT from LUN %d session %d\n", + login->lun->unpacked_lun, login->login_id); + + if (req->node_addr != login->sess->node_id) { + pr_warn("logout from different node ID\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); + return; + } + + sbp_login_release(login, true); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static void session_check_for_reset(struct sbp_session *sess) +{ + bool card_valid = false; + + spin_lock_bh(&sess->lock); + + if (sess->card) { + spin_lock_irq(&sess->card->lock); + card_valid = (sess->card->local_node != NULL); + spin_unlock_irq(&sess->card->lock); + + if (!card_valid) { + fw_card_put(sess->card); + sess->card = NULL; + } + } + + if (!card_valid || (sess->generation != sess->card->generation)) { + pr_info("Waiting for reconnect from node: %016llx\n", + sess->guid); + + sess->node_id = -1; + sess->reconnect_expires = get_jiffies_64() + + ((sess->reconnect_hold + 1) * HZ); + } + + spin_unlock_bh(&sess->lock); +} + +static void session_reconnect_expired(struct sbp_session *sess) +{ + struct sbp_login_descriptor *login, *temp; + LIST_HEAD(login_list); + + pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); + + spin_lock_bh(&sess->lock); + list_for_each_entry_safe(login, temp, &sess->login_list, link) { + login->sess = NULL; + list_del(&login->link); + list_add_tail(&login->link, &login_list); + } + spin_unlock_bh(&sess->lock); + + list_for_each_entry_safe(login, temp, &login_list, link) { + list_del(&login->link); + sbp_login_release(login, false); + } + + sbp_session_release(sess, false); +} + +static void session_maintenance_work(struct work_struct *work) +{ + struct sbp_session *sess = container_of(work, struct sbp_session, + maint_work.work); + + /* could be called while tearing down the session */ + spin_lock_bh(&sess->lock); + if (list_empty(&sess->login_list)) { + spin_unlock_bh(&sess->lock); + return; + } + spin_unlock_bh(&sess->lock); + + if (sess->node_id != -1) { + /* check for bus reset and make node_id invalid */ + session_check_for_reset(sess); + + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { + /* still waiting for reconnect */ + schedule_delayed_work(&sess->maint_work, + SESSION_MAINTENANCE_INTERVAL); + } else { + /* reconnect timeout has expired */ + session_reconnect_expired(sess); + } +} + +static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + __be32 state; + + switch (tcode) { + case TCODE_READ_QUADLET_REQUEST: + pr_debug("tgt_agent AGENT_STATE READ\n"); + + spin_lock_bh(&agent->lock); + state = cpu_to_be32(agent->state); + spin_unlock_bh(&agent->lock); + memcpy(data, &state, sizeof(state)); + + return RCODE_COMPLETE; + + case TCODE_WRITE_QUADLET_REQUEST: + /* ignored */ + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + pr_debug("tgt_agent AGENT_RESET\n"); + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_RESET; + spin_unlock_bh(&agent->lock); + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + struct sbp2_pointer *ptr = data; + + switch (tcode) { + case TCODE_WRITE_BLOCK_REQUEST: + spin_lock_bh(&agent->lock); + if (agent->state != AGENT_STATE_SUSPENDED && + agent->state != AGENT_STATE_RESET) { + spin_unlock_bh(&agent->lock); + pr_notice("Ignoring ORB_POINTER write while active.\n"); + return RCODE_CONFLICT_ERROR; + } + agent->state = AGENT_STATE_ACTIVE; + spin_unlock_bh(&agent->lock); + + agent->orb_pointer = sbp2_pointer_to_addr(ptr); + agent->doorbell = false; + + pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", + agent->orb_pointer); + + queue_work(system_unbound_wq, &agent->work); + + return RCODE_COMPLETE; + + case TCODE_READ_BLOCK_REQUEST: + pr_debug("tgt_agent ORB_POINTER READ\n"); + spin_lock_bh(&agent->lock); + addr_to_sbp2_pointer(agent->orb_pointer, ptr); + spin_unlock_bh(&agent->lock); + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, + struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + spin_lock_bh(&agent->lock); + if (agent->state != AGENT_STATE_SUSPENDED) { + spin_unlock_bh(&agent->lock); + pr_debug("Ignoring DOORBELL while active.\n"); + return RCODE_CONFLICT_ERROR; + } + agent->state = AGENT_STATE_ACTIVE; + spin_unlock_bh(&agent->lock); + + agent->doorbell = true; + + pr_debug("tgt_agent DOORBELL\n"); + + queue_work(system_unbound_wq, &agent->work); + + return RCODE_COMPLETE; + + case TCODE_READ_QUADLET_REQUEST: + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, + int tcode, void *data, struct sbp_target_agent *agent) +{ + switch (tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); + /* ignored as we don't send unsolicited status */ + return RCODE_COMPLETE; + + case TCODE_READ_QUADLET_REQUEST: + return RCODE_COMPLETE; + + default: + return RCODE_TYPE_ERROR; + } +} + +static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *data, size_t length, + void *callback_data) +{ + struct sbp_target_agent *agent = callback_data; + struct sbp_session *sess = agent->login->sess; + int sess_gen, sess_node, rcode; + + spin_lock_bh(&sess->lock); + sess_gen = sess->generation; + sess_node = sess->node_id; + spin_unlock_bh(&sess->lock); + + if (generation != sess_gen) { + pr_notice("ignoring request with wrong generation\n"); + rcode = RCODE_TYPE_ERROR; + goto out; + } + + if (source != sess_node) { + pr_notice("ignoring request from foreign node (%x != %x)\n", + source, sess_node); + rcode = RCODE_TYPE_ERROR; + goto out; + } + + /* turn offset into the offset from the start of the block */ + offset -= agent->handler.offset; + + if (offset == 0x00 && length == 4) { + /* AGENT_STATE */ + rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); + } else if (offset == 0x04 && length == 4) { + /* AGENT_RESET */ + rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); + } else if (offset == 0x08 && length == 8) { + /* ORB_POINTER */ + rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); + } else if (offset == 0x10 && length == 4) { + /* DOORBELL */ + rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); + } else if (offset == 0x14 && length == 4) { + /* UNSOLICITED_STATUS_ENABLE */ + rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, + data, agent); + } else { + rcode = RCODE_ADDRESS_ERROR; + } + +out: + fw_send_response(card, request, rcode); +} + +static void sbp_handle_command(struct sbp_target_request *); +static int sbp_send_status(struct sbp_target_request *); +static void sbp_free_request(struct sbp_target_request *); + +static void tgt_agent_process_work(struct work_struct *work) +{ + struct sbp_target_request *req = + container_of(work, struct sbp_target_request, work); + + pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", + req->orb_pointer, + sbp2_pointer_to_addr(&req->orb.next_orb), + sbp2_pointer_to_addr(&req->orb.data_descriptor), + be32_to_cpu(req->orb.misc)); + + if (req->orb_pointer >> 32) + pr_debug("ORB with high bits set\n"); + + switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { + case 0:/* Format specified by this standard */ + sbp_handle_command(req); + return; + case 1: /* Reserved for future standardization */ + case 2: /* Vendor-dependent */ + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_REQ_TYPE_NOTSUPP)); + sbp_send_status(req); + sbp_free_request(req); + return; + case 3: /* Dummy ORB */ + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_DUMMY_ORB_COMPLETE)); + sbp_send_status(req); + sbp_free_request(req); + return; + default: + BUG(); + } +} + +/* used to double-check we haven't been issued an AGENT_RESET */ +static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) +{ + bool active; + + spin_lock_bh(&agent->lock); + active = (agent->state == AGENT_STATE_ACTIVE); + spin_unlock_bh(&agent->lock); + + return active; +} + +static void tgt_agent_fetch_work(struct work_struct *work) +{ + struct sbp_target_agent *agent = + container_of(work, struct sbp_target_agent, work); + struct sbp_session *sess = agent->login->sess; + struct sbp_target_request *req; + int ret; + bool doorbell = agent->doorbell; + u64 next_orb = agent->orb_pointer; + + while (next_orb && tgt_agent_check_active(agent)) { + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_DEAD; + spin_unlock_bh(&agent->lock); + return; + } + + req->login = agent->login; + req->orb_pointer = next_orb; + + req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( + req->orb_pointer >> 32)); + req->status.orb_low = cpu_to_be32( + req->orb_pointer & 0xfffffffc); + + /* read in the ORB */ + ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, + sess->node_id, sess->generation, sess->speed, + req->orb_pointer, &req->orb, sizeof(req->orb)); + if (ret != RCODE_COMPLETE) { + pr_debug("tgt_orb fetch failed: %x\n", ret); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_SRC( + STATUS_SRC_ORB_FINISHED) | + STATUS_BLOCK_RESP( + STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(1) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_UNSPECIFIED_ERROR)); + spin_lock_bh(&agent->lock); + agent->state = AGENT_STATE_DEAD; + spin_unlock_bh(&agent->lock); + + sbp_send_status(req); + sbp_free_request(req); + return; + } + + /* check the next_ORB field */ + if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { + next_orb = 0; + req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( + STATUS_SRC_ORB_FINISHED)); + } else { + next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); + req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( + STATUS_SRC_ORB_CONTINUING)); + } + + if (tgt_agent_check_active(agent) && !doorbell) { + INIT_WORK(&req->work, tgt_agent_process_work); + queue_work(system_unbound_wq, &req->work); + } else { + /* don't process this request, just check next_ORB */ + sbp_free_request(req); + } + + spin_lock_bh(&agent->lock); + doorbell = agent->doorbell = false; + + /* check if we should carry on processing */ + if (next_orb) + agent->orb_pointer = next_orb; + else + agent->state = AGENT_STATE_SUSPENDED; + + spin_unlock_bh(&agent->lock); + }; +} + +static struct sbp_target_agent *sbp_target_agent_register( + struct sbp_login_descriptor *login) +{ + struct sbp_target_agent *agent; + int ret; + + agent = kmalloc(sizeof(*agent), GFP_KERNEL); + if (!agent) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&agent->lock); + + agent->handler.length = 0x20; + agent->handler.address_callback = tgt_agent_rw; + agent->handler.callback_data = agent; + + agent->login = login; + agent->state = AGENT_STATE_RESET; + INIT_WORK(&agent->work, tgt_agent_fetch_work); + agent->orb_pointer = 0; + agent->doorbell = false; + + ret = fw_core_add_address_handler(&agent->handler, + &sbp_register_region); + if (ret < 0) { + kfree(agent); + return ERR_PTR(ret); + } + + return agent; +} + +static void sbp_target_agent_unregister(struct sbp_target_agent *agent) +{ + fw_core_remove_address_handler(&agent->handler); + cancel_work_sync(&agent->work); + kfree(agent); +} + +/* + * Simple wrapper around fw_run_transaction that retries the transaction several + * times in case of failure, with an exponential backoff. + */ +static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length) +{ + int attempt, ret, delay; + + for (attempt = 1; attempt <= 5; attempt++) { + ret = fw_run_transaction(card, tcode, destination_id, + generation, speed, offset, payload, length); + + switch (ret) { + case RCODE_COMPLETE: + case RCODE_TYPE_ERROR: + case RCODE_ADDRESS_ERROR: + case RCODE_GENERATION: + return ret; + + default: + delay = 5 * attempt * attempt; + usleep_range(delay, delay * 2); + } + } + + return ret; +} + +/* + * Wrapper around sbp_run_transaction that gets the card, destination, + * generation and speed out of the request's session. + */ +static int sbp_run_request_transaction(struct sbp_target_request *req, + int tcode, unsigned long long offset, void *payload, + size_t length) +{ + struct sbp_login_descriptor *login = req->login; + struct sbp_session *sess = login->sess; + struct fw_card *card; + int node_id, generation, speed, ret; + + spin_lock_bh(&sess->lock); + card = fw_card_get(sess->card); + node_id = sess->node_id; + generation = sess->generation; + speed = sess->speed; + spin_unlock_bh(&sess->lock); + + ret = sbp_run_transaction(card, tcode, node_id, generation, speed, + offset, payload, length); + + fw_card_put(card); + + return ret; +} + +static int sbp_fetch_command(struct sbp_target_request *req) +{ + int ret, cmd_len, copy_len; + + cmd_len = scsi_command_size(req->orb.command_block); + + req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); + if (!req->cmd_buf) + return -ENOMEM; + + memcpy(req->cmd_buf, req->orb.command_block, + min_t(int, cmd_len, sizeof(req->orb.command_block))); + + if (cmd_len > sizeof(req->orb.command_block)) { + pr_debug("sbp_fetch_command: filling in long command\n"); + copy_len = cmd_len - sizeof(req->orb.command_block); + + ret = sbp_run_request_transaction(req, + TCODE_READ_BLOCK_REQUEST, + req->orb_pointer + sizeof(req->orb), + req->cmd_buf + sizeof(req->orb.command_block), + copy_len); + if (ret != RCODE_COMPLETE) + return -EIO; + } + + return 0; +} + +static int sbp_fetch_page_table(struct sbp_target_request *req) +{ + int pg_tbl_sz, ret; + struct sbp_page_table_entry *pg_tbl; + + if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) + return 0; + + pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * + sizeof(struct sbp_page_table_entry); + + pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + + ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, + sbp2_pointer_to_addr(&req->orb.data_descriptor), + pg_tbl, pg_tbl_sz); + if (ret != RCODE_COMPLETE) { + kfree(pg_tbl); + return -EIO; + } + + req->pg_tbl = pg_tbl; + return 0; +} + +static void sbp_calc_data_length_direction(struct sbp_target_request *req, + u32 *data_len, enum dma_data_direction *data_dir) +{ + int data_size, direction, idx; + + data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); + direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); + + if (!data_size) { + *data_len = 0; + *data_dir = DMA_NONE; + return; + } + + *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (req->pg_tbl) { + *data_len = 0; + for (idx = 0; idx < data_size; idx++) { + *data_len += be16_to_cpu( + req->pg_tbl[idx].segment_length); + } + } else { + *data_len = data_size; + } +} + +static void sbp_handle_command(struct sbp_target_request *req) +{ + struct sbp_login_descriptor *login = req->login; + struct sbp_session *sess = login->sess; + int ret, unpacked_lun; + u32 data_length; + enum dma_data_direction data_dir; + + ret = sbp_fetch_command(req); + if (ret) { + pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + sbp_free_request(req); + return; + } + + ret = sbp_fetch_page_table(req); + if (ret) { + pr_debug("sbp_handle_command: fetch page table failed: %d\n", + ret); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + sbp_free_request(req); + return; + } + + unpacked_lun = req->login->lun->unpacked_lun; + sbp_calc_data_length_direction(req, &data_length, &data_dir); + + pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", + req->orb_pointer, unpacked_lun, data_length, data_dir); + + target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, + req->sense_buf, unpacked_lun, data_length, + MSG_SIMPLE_TAG, data_dir, 0); +} + +/* + * DMA_TO_DEVICE = read from initiator (SCSI WRITE) + * DMA_FROM_DEVICE = write to initiator (SCSI READ) + */ +static int sbp_rw_data(struct sbp_target_request *req) +{ + struct sbp_session *sess = req->login->sess; + int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, + generation, num_pte, length, tfr_length, + rcode = RCODE_COMPLETE; + struct sbp_page_table_entry *pte; + unsigned long long offset; + struct fw_card *card; + struct sg_mapping_iter iter; + + if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { + tcode = TCODE_WRITE_BLOCK_REQUEST; + sg_miter_flags = SG_MITER_FROM_SG; + } else { + tcode = TCODE_READ_BLOCK_REQUEST; + sg_miter_flags = SG_MITER_TO_SG; + } + + max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); + speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); + + pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); + if (pg_size) { + pr_err("sbp_run_transaction: page size ignored\n"); + pg_size = 0x100 << pg_size; + } + + spin_lock_bh(&sess->lock); + card = fw_card_get(sess->card); + node_id = sess->node_id; + generation = sess->generation; + spin_unlock_bh(&sess->lock); + + if (req->pg_tbl) { + pte = req->pg_tbl; + num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); + + offset = 0; + length = 0; + } else { + pte = NULL; + num_pte = 0; + + offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); + length = req->se_cmd.data_length; + } + + sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, + sg_miter_flags); + + while (length || num_pte) { + if (!length) { + offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | + be32_to_cpu(pte->segment_base_lo); + length = be16_to_cpu(pte->segment_length); + + pte++; + num_pte--; + } + + sg_miter_next(&iter); + + tfr_length = min3(length, max_payload, (int)iter.length); + + /* FIXME: take page_size into account */ + + rcode = sbp_run_transaction(card, tcode, node_id, + generation, speed, + offset, iter.addr, tfr_length); + + if (rcode != RCODE_COMPLETE) + break; + + length -= tfr_length; + offset += tfr_length; + iter.consumed = tfr_length; + } + + sg_miter_stop(&iter); + fw_card_put(card); + + if (rcode == RCODE_COMPLETE) { + WARN_ON(length != 0); + return 0; + } else { + return -EIO; + } +} + +static int sbp_send_status(struct sbp_target_request *req) +{ + int ret, length; + struct sbp_login_descriptor *login = req->login; + + length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; + + ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, + login->status_fifo_addr, &req->status, length); + if (ret != RCODE_COMPLETE) { + pr_debug("sbp_send_status: write failed: 0x%x\n", ret); + return -EIO; + } + + pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", + req->orb_pointer); + + return 0; +} + +static void sbp_sense_mangle(struct sbp_target_request *req) +{ + struct se_cmd *se_cmd = &req->se_cmd; + u8 *sense = req->sense_buf; + u8 *status = req->status.data; + + WARN_ON(se_cmd->scsi_sense_length < 18); + + switch (sense[0] & 0x7f) { /* sfmt */ + case 0x70: /* current, fixed */ + status[0] = 0 << 6; + break; + case 0x71: /* deferred, fixed */ + status[0] = 1 << 6; + break; + case 0x72: /* current, descriptor */ + case 0x73: /* deferred, descriptor */ + default: + /* + * TODO: SBP-3 specifies what we should do with descriptor + * format sense data + */ + pr_err("sbp_send_sense: unknown sense format: 0x%x\n", + sense[0]); + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); + return; + } + + status[0] |= se_cmd->scsi_status & 0x3f;/* status */ + status[1] = + (sense[0] & 0x80) | /* valid */ + ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ + (sense[2] & 0x0f); /* sense_key */ + status[2] = se_cmd->scsi_asc; /* sense_code */ + status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ + + /* information */ + status[4] = sense[3]; + status[5] = sense[4]; + status[6] = sense[5]; + status[7] = sense[6]; + + /* CDB-dependent */ + status[8] = sense[8]; + status[9] = sense[9]; + status[10] = sense[10]; + status[11] = sense[11]; + + /* fru */ + status[12] = sense[14]; + + /* sense_key-dependent */ + status[13] = sense[15]; + status[14] = sense[16]; + status[15] = sense[17]; + + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(5) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); +} + +static int sbp_send_sense(struct sbp_target_request *req) +{ + struct se_cmd *se_cmd = &req->se_cmd; + + if (se_cmd->scsi_sense_length) { + sbp_sense_mangle(req); + } else { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); + } + + return sbp_send_status(req); +} + +static void sbp_free_request(struct sbp_target_request *req) +{ + kfree(req->pg_tbl); + kfree(req->cmd_buf); + kfree(req); +} + +static void sbp_mgt_agent_process(struct work_struct *work) +{ + struct sbp_management_agent *agent = + container_of(work, struct sbp_management_agent, work); + struct sbp_management_request *req = agent->request; + int ret; + int status_data_len = 0; + + /* fetch the ORB from the initiator */ + ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, + req->node_addr, req->generation, req->speed, + agent->orb_offset, &req->orb, sizeof(req->orb)); + if (ret != RCODE_COMPLETE) { + pr_debug("mgt_orb fetch failed: %x\n", ret); + goto out; + } + + pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", + sbp2_pointer_to_addr(&req->orb.ptr1), + sbp2_pointer_to_addr(&req->orb.ptr2), + be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), + sbp2_pointer_to_addr(&req->orb.status_fifo)); + + if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || + ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { + pr_err("mgt_orb bad request\n"); + goto out; + } + + switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { + case MANAGEMENT_ORB_FUNCTION_LOGIN: + sbp_management_request_login(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: + sbp_management_request_query_logins(agent, req, + &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_RECONNECT: + sbp_management_request_reconnect(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: + pr_notice("SET PASSWORD not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_LOGOUT: + sbp_management_request_logout(agent, req, &status_data_len); + break; + + case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: + pr_notice("ABORT TASK not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: + pr_notice("ABORT TASK SET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: + pr_notice("LOGICAL UNIT RESET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: + pr_notice("TARGET RESET not implemented\n"); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + + default: + pr_notice("unknown management function 0x%x\n", + MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); + + req->status.status = cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); + + break; + } + + req->status.status |= cpu_to_be32( + STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ + STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | + STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); + req->status.orb_low = cpu_to_be32(agent->orb_offset); + + /* write the status block back to the initiator */ + ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, + req->node_addr, req->generation, req->speed, + sbp2_pointer_to_addr(&req->orb.status_fifo), + &req->status, 8 + status_data_len); + if (ret != RCODE_COMPLETE) { + pr_debug("mgt_orb status write failed: %x\n", ret); + goto out; + } + +out: + fw_card_put(req->card); + kfree(req); + + spin_lock_bh(&agent->lock); + agent->state = MANAGEMENT_AGENT_STATE_IDLE; + spin_unlock_bh(&agent->lock); +} + +static void sbp_mgt_agent_rw(struct fw_card *card, + struct fw_request *request, int tcode, int destination, int source, + int generation, unsigned long long offset, void *data, size_t length, + void *callback_data) +{ + struct sbp_management_agent *agent = callback_data; + struct sbp2_pointer *ptr = data; + int rcode = RCODE_ADDRESS_ERROR; + + if (!agent->tport->enable) + goto out; + + if ((offset != agent->handler.offset) || (length != 8)) + goto out; + + if (tcode == TCODE_WRITE_BLOCK_REQUEST) { + struct sbp_management_request *req; + int prev_state; + + spin_lock_bh(&agent->lock); + prev_state = agent->state; + agent->state = MANAGEMENT_AGENT_STATE_BUSY; + spin_unlock_bh(&agent->lock); + + if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { + pr_notice("ignoring management request while busy\n"); + rcode = RCODE_CONFLICT_ERROR; + goto out; + } + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + rcode = RCODE_CONFLICT_ERROR; + goto out; + } + + req->card = fw_card_get(card); + req->generation = generation; + req->node_addr = source; + req->speed = fw_get_request_speed(request); + + agent->orb_offset = sbp2_pointer_to_addr(ptr); + agent->request = req; + + queue_work(system_unbound_wq, &agent->work); + rcode = RCODE_COMPLETE; + } else if (tcode == TCODE_READ_BLOCK_REQUEST) { + addr_to_sbp2_pointer(agent->orb_offset, ptr); + rcode = RCODE_COMPLETE; + } else { + rcode = RCODE_TYPE_ERROR; + } + +out: + fw_send_response(card, request, rcode); +} + +static struct sbp_management_agent *sbp_management_agent_register( + struct sbp_tport *tport) +{ + int ret; + struct sbp_management_agent *agent; + + agent = kmalloc(sizeof(*agent), GFP_KERNEL); + if (!agent) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&agent->lock); + agent->tport = tport; + agent->handler.length = 0x08; + agent->handler.address_callback = sbp_mgt_agent_rw; + agent->handler.callback_data = agent; + agent->state = MANAGEMENT_AGENT_STATE_IDLE; + INIT_WORK(&agent->work, sbp_mgt_agent_process); + agent->orb_offset = 0; + agent->request = NULL; + + ret = fw_core_add_address_handler(&agent->handler, + &sbp_register_region); + if (ret < 0) { + kfree(agent); + return ERR_PTR(ret); + } + + return agent; +} + +static void sbp_management_agent_unregister(struct sbp_management_agent *agent) +{ + fw_core_remove_address_handler(&agent->handler); + cancel_work_sync(&agent->work); + kfree(agent); +} + +static int sbp_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int sbp_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *sbp_get_fabric_name(void) +{ + return "sbp"; +} + +static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 sbp_get_tag(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 sbp_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg) +{ + struct sbp_nacl *nacl; + + nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct sbp_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void sbp_release_fabric_acl( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct sbp_nacl *nacl = + container_of(se_nacl, struct sbp_nacl, se_node_acl); + kfree(nacl); +} + +static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void sbp_release_cmd(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + sbp_free_request(req); +} + +static int sbp_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void sbp_close_session(struct se_session *se_sess) +{ + return; +} + +static u32 sbp_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int sbp_write_pending(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + int ret; + + ret = sbp_rw_data(req); + if (ret) { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP( + STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS( + SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + return ret; + } + + transport_generic_process_write(se_cmd); + + return 0; +} + +static int sbp_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void sbp_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 sbp_get_task_tag(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + /* only used for printk until we do TMRs */ + return (u32)req->orb_pointer; +} + +static int sbp_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static int sbp_queue_data_in(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + int ret; + + ret = sbp_rw_data(req); + if (ret) { + req->status.status |= cpu_to_be32( + STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | + STATUS_BLOCK_DEAD(0) | + STATUS_BLOCK_LEN(1) | + STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); + sbp_send_status(req); + return ret; + } + + return sbp_send_sense(req); +} + +/* + * Called after command (no data transfer) or after the write (to device) + * operation is completed + */ +static int sbp_queue_status(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + return sbp_send_sense(req); +} + +static int sbp_queue_tm_rsp(struct se_cmd *se_cmd) +{ + return 0; +} + +static u16 sbp_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length) +{ + return 0; +} + +static u16 sbp_get_fabric_sense_len(void) +{ + return 0; +} + +static int sbp_check_stop_free(struct se_cmd *se_cmd) +{ + struct sbp_target_request *req = container_of(se_cmd, + struct sbp_target_request, se_cmd); + + transport_generic_free_cmd(&req->se_cmd, 0); + return 1; +} + +/* + * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3) + */ +static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + /* + * Return a IEEE 1394 SCSI Protocol identifier for loopback operations + * This is defined in section 7.5.1 Table 362 in spc4r17 + */ + return SCSI_PROTOCOL_SBP; +} + +static u32 sbp_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + int ret; + + /* + * Set PROTOCOL IDENTIFIER to 3h for SBP + */ + buf[0] = SCSI_PROTOCOL_SBP; + /* + * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI + * over IEEE 1394 + */ + ret = hex2bin(&buf[8], se_nacl->initiatorname, 8); + if (ret < 0) + pr_debug("sbp transport_id: invalid hex string\n"); + + /* + * The IEEE 1394 Transport ID is a hardcoded 24-byte length + */ + return 24; +} + +static u32 sbp_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + *format_code = 0; + /* + * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI + * over IEEE 1394 + * + * The SBP Transport ID is a hardcoded 24-byte length + */ + return 24; +} + +/* + * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above + * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. + */ +static char *sbp_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + /* + * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID + * for initiator ports using SCSI over SBP Serial SCSI Protocol + * + * The TransportID for a IEEE 1394 Initiator Port is of fixed size of + * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier, + * so we return the **port_nexus_ptr set to NULL. + */ + *port_nexus_ptr = NULL; + *out_tid_len = 24; + + return (char *)&buf[8]; +} + +static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) +{ + int i, count = 0; + + spin_lock(&tpg->tpg_lun_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + struct se_lun *se_lun = tpg->tpg_lun_list[i]; + + if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) + continue; + + count++; + } + spin_unlock(&tpg->tpg_lun_lock); + + return count; +} + +static int sbp_update_unit_directory(struct sbp_tport *tport) +{ + int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i; + u32 *data; + + if (tport->unit_directory.data) { + fw_core_remove_descriptor(&tport->unit_directory); + kfree(tport->unit_directory.data); + tport->unit_directory.data = NULL; + } + + if (!tport->enable || !tport->tpg) + return 0; + + num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); + + /* + * Number of entries in the final unit directory: + * - all of those in the template + * - management_agent + * - unit_characteristics + * - reconnect_timeout + * - unit unique ID + * - one for each LUN + * + * MUST NOT include leaf or sub-directory entries + */ + num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; + + if (tport->directory_id != -1) + num_entries++; + + /* allocate num_entries + 4 for the header and unique ID leaf */ + data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* directory_length */ + data[idx++] = num_entries << 16; + + /* directory_id */ + if (tport->directory_id != -1) + data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; + + /* unit directory template */ + memcpy(&data[idx], sbp_unit_directory_template, + sizeof(sbp_unit_directory_template)); + idx += ARRAY_SIZE(sbp_unit_directory_template); + + /* management_agent */ + mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; + data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); + + /* unit_characteristics */ + data[idx++] = 0x3a000000 | + (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | + SBP_ORB_FETCH_SIZE; + + /* reconnect_timeout */ + data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); + + /* unit unique ID (leaf is just after LUNs) */ + data[idx++] = 0x8d000000 | (num_luns + 1); + + spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); + for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { + struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i]; + struct se_device *dev; + int type; + + if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) + continue; + + spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); + + dev = se_lun->lun_se_dev; + type = dev->transport->get_device_type(dev); + + /* logical_unit_number */ + data[idx++] = 0x14000000 | + ((type << 16) & 0x1f0000) | + (se_lun->unpacked_lun & 0xffff); + + spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); + } + spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); + + /* unit unique ID leaf */ + data[idx++] = 2 << 16; + data[idx++] = tport->guid >> 32; + data[idx++] = tport->guid; + + tport->unit_directory.length = idx; + tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; + tport->unit_directory.data = data; + + ret = fw_core_add_descriptor(&tport->unit_directory); + if (ret < 0) { + kfree(tport->unit_directory.data); + tport->unit_directory.data = NULL; + } + + return ret; +} + +static ssize_t sbp_parse_wwn(const char *name, u64 *wwn, int strict) +{ + const char *cp; + char c, nibble; + int pos = 0, err; + + *wwn = 0; + for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { + c = *cp; + if (c == '\n' && cp[1] == '\0') + continue; + if (c == '\0') { + err = 2; + if (pos != 16) + goto fail; + return cp - name; + } + err = 3; + if (isdigit(c)) + nibble = c - '0'; + else if (isxdigit(c) && (islower(c) || !strict)) + nibble = tolower(c) - 'a' + 10; + else + goto fail; + *wwn = (*wwn << 4) | nibble; + pos++; + } + err = 4; +fail: + printk(KERN_INFO "err %u len %zu pos %u\n", + err, cp - name, pos); + return -1; +} + +static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) +{ + return snprintf(buf, len, "%016llx", wwn); +} + +static struct se_node_acl *sbp_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct sbp_nacl *nacl; + u64 guid = 0; + u32 nexus_depth = 1; + + if (sbp_parse_wwn(name, &guid, 1) < 0) + return ERR_PTR(-EINVAL); + + se_nacl_new = sbp_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + sbp_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + + nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl); + nacl->guid = guid; + sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid); + + return se_nacl; +} + +static void sbp_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct sbp_nacl *nacl = + container_of(se_acl, struct sbp_nacl, se_node_acl); + + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int sbp_post_link_lun( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + + return sbp_update_unit_directory(tpg->tport); +} + +static void sbp_pre_unlink_lun( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + int ret; + + if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) + tport->enable = 0; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + pr_err("unlink LUN: failed to update unit directory\n"); +} + +static struct se_portal_group *sbp_make_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct sbp_tport *tport = + container_of(wwn, struct sbp_tport, tport_wwn); + + struct sbp_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + if (tport->tpg) { + pr_err("Only one TPG per Unit is possible.\n"); + return ERR_PTR(-EBUSY); + } + + tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct sbp_tpg\n"); + return ERR_PTR(-ENOMEM); + } + + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + tport->tpg = tpg; + + /* default attribute values */ + tport->enable = 0; + tport->directory_id = -1; + tport->mgt_orb_timeout = 15; + tport->max_reconnect_timeout = 5; + tport->max_logins_per_lun = 1; + + tport->mgt_agt = sbp_management_agent_register(tport); + if (IS_ERR(tport->mgt_agt)) { + ret = PTR_ERR(tport->mgt_agt); + kfree(tpg); + return ERR_PTR(ret); + } + + ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, (void *)tpg, + TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + sbp_management_agent_unregister(tport->mgt_agt); + kfree(tpg); + return ERR_PTR(ret); + } + + return &tpg->se_tpg; +} + +static void sbp_drop_tpg(struct se_portal_group *se_tpg) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + core_tpg_deregister(se_tpg); + sbp_management_agent_unregister(tport->mgt_agt); + tport->tpg = NULL; + kfree(tpg); +} + +static struct se_wwn *sbp_make_tport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct sbp_tport *tport; + u64 guid = 0; + + if (sbp_parse_wwn(name, &guid, 1) < 0) + return ERR_PTR(-EINVAL); + + tport = kzalloc(sizeof(*tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct sbp_tport\n"); + return ERR_PTR(-ENOMEM); + } + + tport->guid = guid; + sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); + + return &tport->tport_wwn; +} + +static void sbp_drop_tport(struct se_wwn *wwn) +{ + struct sbp_tport *tport = + container_of(wwn, struct sbp_tport, tport_wwn); + + kfree(tport); +} + +static ssize_t sbp_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); +} + +TF_WWN_ATTR_RO(sbp, version); + +static struct configfs_attribute *sbp_wwn_attrs[] = { + &sbp_wwn_version.attr, + NULL, +}; + +static ssize_t sbp_tpg_show_directory_id( + struct se_portal_group *se_tpg, + char *page) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + + if (tport->directory_id == -1) + return sprintf(page, "implicit\n"); + else + return sprintf(page, "%06x\n", tport->directory_id); +} + +static ssize_t sbp_tpg_store_directory_id( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + + if (tport->enable) { + pr_err("Cannot change the directory_id on an active target.\n"); + return -EBUSY; + } + + if (strstr(page, "implicit") == page) { + tport->directory_id = -1; + } else { + if (kstrtoul(page, 16, &val) < 0) + return -EINVAL; + if (val > 0xffffff) + return -EINVAL; + + tport->directory_id = val; + } + + return count; +} + +static ssize_t sbp_tpg_show_enable( + struct se_portal_group *se_tpg, + char *page) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->enable); +} + +static ssize_t sbp_tpg_store_enable( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + int ret; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val != 0) && (val != 1)) + return -EINVAL; + + if (tport->enable == val) + return count; + + if (val) { + if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { + pr_err("Cannot enable a target with no LUNs!\n"); + return -EINVAL; + } + } else { + /* XXX: force-shutdown sessions instead? */ + spin_lock_bh(&se_tpg->session_lock); + if (!list_empty(&se_tpg->tpg_sess_list)) { + spin_unlock_bh(&se_tpg->session_lock); + return -EBUSY; + } + spin_unlock_bh(&se_tpg->session_lock); + } + + tport->enable = val; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) { + pr_err("Could not update Config ROM\n"); + return ret; + } + + return count; +} + +TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR); +TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *sbp_tpg_base_attrs[] = { + &sbp_tpg_directory_id.attr, + &sbp_tpg_enable.attr, + NULL, +}; + +static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout( + struct se_portal_group *se_tpg, + char *page) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->mgt_orb_timeout); +} + +static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + int ret; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 127)) + return -EINVAL; + + if (tport->mgt_orb_timeout == val) + return count; + + tport->mgt_orb_timeout = val; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout( + struct se_portal_group *se_tpg, + char *page) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->max_reconnect_timeout); +} + +static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + int ret; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 32767)) + return -EINVAL; + + if (tport->max_reconnect_timeout == val) + return count; + + tport->max_reconnect_timeout = val; + + ret = sbp_update_unit_directory(tport); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t sbp_tpg_attrib_show_max_logins_per_lun( + struct se_portal_group *se_tpg, + char *page) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + return sprintf(page, "%d\n", tport->max_logins_per_lun); +} + +static ssize_t sbp_tpg_attrib_store_max_logins_per_lun( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); + struct sbp_tport *tport = tpg->tport; + unsigned long val; + + if (kstrtoul(page, 0, &val) < 0) + return -EINVAL; + if ((val < 1) || (val > 127)) + return -EINVAL; + + /* XXX: also check against current count? */ + + tport->max_logins_per_lun = val; + + return count; +} + +TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR); +TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR); +TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { + &sbp_tpg_attrib_mgt_orb_timeout.attr, + &sbp_tpg_attrib_max_reconnect_timeout.attr, + &sbp_tpg_attrib_max_logins_per_lun.attr, + NULL, +}; + +static struct target_core_fabric_ops sbp_ops = { + .get_fabric_name = sbp_get_fabric_name, + .get_fabric_proto_ident = sbp_get_fabric_proto_ident, + .tpg_get_wwn = sbp_get_fabric_wwn, + .tpg_get_tag = sbp_get_tag, + .tpg_get_default_depth = sbp_get_default_depth, + .tpg_get_pr_transport_id = sbp_get_pr_transport_id, + .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id, + .tpg_check_demo_mode = sbp_check_true, + .tpg_check_demo_mode_cache = sbp_check_true, + .tpg_check_demo_mode_write_protect = sbp_check_false, + .tpg_check_prod_mode_write_protect = sbp_check_false, + .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl, + .tpg_release_fabric_acl = sbp_release_fabric_acl, + .tpg_get_inst_index = sbp_tpg_get_inst_index, + .release_cmd = sbp_release_cmd, + .shutdown_session = sbp_shutdown_session, + .close_session = sbp_close_session, + .sess_get_index = sbp_sess_get_index, + .write_pending = sbp_write_pending, + .write_pending_status = sbp_write_pending_status, + .set_default_node_attributes = sbp_set_default_node_attrs, + .get_task_tag = sbp_get_task_tag, + .get_cmd_state = sbp_get_cmd_state, + .queue_data_in = sbp_queue_data_in, + .queue_status = sbp_queue_status, + .queue_tm_rsp = sbp_queue_tm_rsp, + .get_fabric_sense_len = sbp_get_fabric_sense_len, + .set_fabric_sense_len = sbp_set_fabric_sense_len, + .check_stop_free = sbp_check_stop_free, + + .fabric_make_wwn = sbp_make_tport, + .fabric_drop_wwn = sbp_drop_tport, + .fabric_make_tpg = sbp_make_tpg, + .fabric_drop_tpg = sbp_drop_tpg, + .fabric_post_link = sbp_post_link_lun, + .fabric_pre_unlink = sbp_pre_unlink_lun, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = sbp_make_nodeacl, + .fabric_drop_nodeacl = sbp_drop_nodeacl, +}; + +static int sbp_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); + if (!fabric) { + pr_err("target_fabric_configfs_init() failed\n"); + return -ENOMEM; + } + + fabric->tf_ops = sbp_ops; + + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed for SBP\n"); + return ret; + } + + sbp_fabric_configfs = fabric; + + return 0; +}; + +static void sbp_deregister_configfs(void) +{ + if (!sbp_fabric_configfs) + return; + + target_fabric_configfs_deregister(sbp_fabric_configfs); + sbp_fabric_configfs = NULL; +}; + +static int __init sbp_init(void) +{ + int ret; + + ret = sbp_register_configfs(); + if (ret < 0) + return ret; + + return 0; +}; + +static void sbp_exit(void) +{ + sbp_deregister_configfs(); +}; + +MODULE_DESCRIPTION("FireWire SBP fabric driver"); +MODULE_LICENSE("GPL"); +module_init(sbp_init); +module_exit(sbp_exit); diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h new file mode 100644 index 00000000000..6d0d74a2c54 --- /dev/null +++ b/drivers/target/sbp/sbp_target.h @@ -0,0 +1,251 @@ +#ifndef _SBP_BASE_H +#define _SBP_BASE_H + +#include <linux/firewire.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/workqueue.h> +#include <target/target_core_base.h> + +#define SBP_VERSION "v0.1" +#define SBP_NAMELEN 32 + +#define SBP_ORB_FETCH_SIZE 8 + +#define MANAGEMENT_AGENT_STATE_IDLE 0 +#define MANAGEMENT_AGENT_STATE_BUSY 1 + +#define ORB_NOTIFY(v) (((v) >> 31) & 0x01) +#define ORB_REQUEST_FORMAT(v) (((v) >> 29) & 0x03) + +#define MANAGEMENT_ORB_FUNCTION(v) (((v) >> 16) & 0x0f) + +#define MANAGEMENT_ORB_FUNCTION_LOGIN 0x0 +#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS 0x1 +#define MANAGEMENT_ORB_FUNCTION_RECONNECT 0x3 +#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD 0x4 +#define MANAGEMENT_ORB_FUNCTION_LOGOUT 0x7 +#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK 0xb +#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET 0xc +#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET 0xe +#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET 0xf + +#define LOGIN_ORB_EXCLUSIVE(v) (((v) >> 28) & 0x01) +#define LOGIN_ORB_RESERVED(v) (((v) >> 24) & 0x0f) +#define LOGIN_ORB_RECONNECT(v) (((v) >> 20) & 0x0f) +#define LOGIN_ORB_LUN(v) (((v) >> 0) & 0xffff) +#define LOGIN_ORB_PASSWORD_LENGTH(v) (((v) >> 16) & 0xffff) +#define LOGIN_ORB_RESPONSE_LENGTH(v) (((v) >> 0) & 0xffff) + +#define RECONNECT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff) +#define LOGOUT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff) + +#define CMDBLK_ORB_DIRECTION(v) (((v) >> 27) & 0x01) +#define CMDBLK_ORB_SPEED(v) (((v) >> 24) & 0x07) +#define CMDBLK_ORB_MAX_PAYLOAD(v) (((v) >> 20) & 0x0f) +#define CMDBLK_ORB_PG_TBL_PRESENT(v) (((v) >> 19) & 0x01) +#define CMDBLK_ORB_PG_SIZE(v) (((v) >> 16) & 0x07) +#define CMDBLK_ORB_DATA_SIZE(v) (((v) >> 0) & 0xffff) + +#define STATUS_BLOCK_SRC(v) (((v) & 0x03) << 30) +#define STATUS_BLOCK_RESP(v) (((v) & 0x03) << 28) +#define STATUS_BLOCK_DEAD(v) (((v) ? 1 : 0) << 27) +#define STATUS_BLOCK_LEN(v) (((v) & 0x07) << 24) +#define STATUS_BLOCK_SBP_STATUS(v) (((v) & 0xff) << 16) +#define STATUS_BLOCK_ORB_OFFSET_HIGH(v) (((v) & 0xffff) << 0) + +#define STATUS_SRC_ORB_CONTINUING 0 +#define STATUS_SRC_ORB_FINISHED 1 +#define STATUS_SRC_UNSOLICITED 2 + +#define STATUS_RESP_REQUEST_COMPLETE 0 +#define STATUS_RESP_TRANSPORT_FAILURE 1 +#define STATUS_RESP_ILLEGAL_REQUEST 2 +#define STATUS_RESP_VENDOR_DEPENDENT 3 + +#define SBP_STATUS_OK 0 +#define SBP_STATUS_REQ_TYPE_NOTSUPP 1 +#define SBP_STATUS_SPEED_NOTSUPP 2 +#define SBP_STATUS_PAGE_SIZE_NOTSUPP 3 +#define SBP_STATUS_ACCESS_DENIED 4 +#define SBP_STATUS_LUN_NOTSUPP 5 +#define SBP_STATUS_PAYLOAD_TOO_SMALL 6 +/* 7 is reserved */ +#define SBP_STATUS_RESOURCES_UNAVAIL 8 +#define SBP_STATUS_FUNCTION_REJECTED 9 +#define SBP_STATUS_LOGIN_ID_UNKNOWN 10 +#define SBP_STATUS_DUMMY_ORB_COMPLETE 11 +#define SBP_STATUS_REQUEST_ABORTED 12 +#define SBP_STATUS_UNSPECIFIED_ERROR 0xff + +#define AGENT_STATE_RESET 0 +#define AGENT_STATE_ACTIVE 1 +#define AGENT_STATE_SUSPENDED 2 +#define AGENT_STATE_DEAD 3 + +struct sbp2_pointer { + __be32 high; + __be32 low; +}; + +struct sbp_command_block_orb { + struct sbp2_pointer next_orb; + struct sbp2_pointer data_descriptor; + __be32 misc; + u8 command_block[12]; +}; + +struct sbp_page_table_entry { + __be16 segment_length; + __be16 segment_base_hi; + __be32 segment_base_lo; +}; + +struct sbp_management_orb { + struct sbp2_pointer ptr1; + struct sbp2_pointer ptr2; + __be32 misc; + __be32 length; + struct sbp2_pointer status_fifo; +}; + +struct sbp_status_block { + __be32 status; + __be32 orb_low; + u8 data[24]; +}; + +struct sbp_login_response_block { + __be32 misc; + struct sbp2_pointer command_block_agent; + __be32 reconnect_hold; +}; + +struct sbp_login_descriptor { + struct sbp_session *sess; + struct list_head link; + + struct se_lun *lun; + + u64 status_fifo_addr; + int exclusive; + u16 login_id; + + struct sbp_target_agent *tgt_agt; +}; + +struct sbp_session { + spinlock_t lock; + struct se_session *se_sess; + struct list_head login_list; + struct delayed_work maint_work; + + u64 guid; /* login_owner_EUI_64 */ + int node_id; /* login_owner_ID */ + + struct fw_card *card; + int generation; + int speed; + + int reconnect_hold; + u64 reconnect_expires; +}; + +struct sbp_nacl { + /* Initiator EUI-64 */ + u64 guid; + /* ASCII formatted GUID for SBP Initiator port */ + char iport_name[SBP_NAMELEN]; + /* Returned by sbp_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct sbp_tpg { + /* Target portal group tag for TCM */ + u16 tport_tpgt; + /* Pointer back to sbp_tport */ + struct sbp_tport *tport; + /* Returned by sbp_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct sbp_tport { + /* Target Unit Identifier (EUI-64) */ + u64 guid; + /* Target port name */ + char tport_name[SBP_NAMELEN]; + /* Returned by sbp_make_tport() */ + struct se_wwn tport_wwn; + + struct sbp_tpg *tpg; + + /* FireWire unit directory */ + struct fw_descriptor unit_directory; + + /* SBP Management Agent */ + struct sbp_management_agent *mgt_agt; + + /* Parameters */ + int enable; + s32 directory_id; + int mgt_orb_timeout; + int max_reconnect_timeout; + int max_logins_per_lun; +}; + +static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr) +{ + return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 | + (be32_to_cpu(ptr->low) & 0xfffffffc); +} + +static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr) +{ + ptr->high = cpu_to_be32(addr >> 32); + ptr->low = cpu_to_be32(addr); +} + +struct sbp_target_agent { + spinlock_t lock; + struct fw_address_handler handler; + struct sbp_login_descriptor *login; + int state; + struct work_struct work; + u64 orb_pointer; + bool doorbell; +}; + +struct sbp_target_request { + struct sbp_login_descriptor *login; + u64 orb_pointer; + struct sbp_command_block_orb orb; + struct sbp_status_block status; + struct work_struct work; + + struct se_cmd se_cmd; + struct sbp_page_table_entry *pg_tbl; + void *cmd_buf; + + unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; +}; + +struct sbp_management_agent { + spinlock_t lock; + struct sbp_tport *tport; + struct fw_address_handler handler; + int state; + struct work_struct work; + u64 orb_offset; + struct sbp_management_request *request; +}; + +struct sbp_management_request { + struct sbp_management_orb orb; + struct sbp_status_block status; + struct fw_card *card; + int generation; + int node_addr; + int speed; +}; + +#endif diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index c7746a3339d..e624b836469 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -59,26 +59,31 @@ struct t10_alua_lu_gp *default_lu_gp; * * See spc4r17 section 6.27 */ -int target_emulate_report_target_port_groups(struct se_task *task) +int target_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned char *buf; - u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first - Target port group descriptor */ + u32 rd_len = 0, off; + int ext_hdr = (cmd->t_task_cdb[1] & 0x20); /* - * Need at least 4 bytes of response data or else we can't - * even fit the return data length. + * Skip over RESERVED area to first Target port group descriptor + * depending on the PARAMETER DATA FORMAT type.. */ - if (cmd->data_length < 4) { - pr_warn("REPORT TARGET PORT GROUPS allocation length %u" - " too small\n", cmd->data_length); + if (ext_hdr != 0) + off = 8; + else + off = 4; + + if (cmd->data_length < off) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" + " small for %s header\n", cmd->data_length, + (ext_hdr) ? "extended" : "normal"); + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } - buf = transport_kmap_data_sg(cmd); spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); @@ -159,15 +164,34 @@ int target_emulate_report_target_port_groups(struct se_task *task) /* * Set the RETURN DATA LENGTH set in the header of the DataIN Payload */ - buf[0] = ((rd_len >> 24) & 0xff); - buf[1] = ((rd_len >> 16) & 0xff); - buf[2] = ((rd_len >> 8) & 0xff); - buf[3] = (rd_len & 0xff); + put_unaligned_be32(rd_len, &buf[0]); + /* + * Fill in the Extended header parameter data format if requested + */ + if (ext_hdr != 0) { + buf[4] = 0x10; + /* + * Set the implict transition time (in seconds) for the application + * client to use as a base for it's transition timeout value. + * + * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN + * this CDB was received upon to determine this value individually + * for ALUA target port group. + */ + port = cmd->se_lun->lun_sep; + tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; + if (tg_pt_gp_mem) { + spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; + if (tg_pt_gp) + buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; + spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); + } + } transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -176,9 +200,8 @@ int target_emulate_report_target_port_groups(struct se_task *task) * * See spc4r17 section 6.35 */ -int target_emulate_set_target_port_groups(struct se_task *task) +int target_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; @@ -351,8 +374,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) out: transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -391,7 +413,7 @@ static inline int core_alua_state_standby( case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -433,7 +455,7 @@ static inline int core_alua_state_unavailable( case INQUIRY: case REPORT_LUNS: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -473,7 +495,7 @@ static inline int core_alua_state_transition( case INQUIRY: case REPORT_LUNS: case MAINTENANCE_IN: - switch (cdb[1]) { + switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: @@ -1359,6 +1381,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; + tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; if (def_group) { spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); @@ -1855,6 +1878,37 @@ ssize_t core_alua_store_trans_delay_msecs( return count; } +ssize_t core_alua_show_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); +} + +ssize_t core_alua_store_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + pr_err("Unable to extract implict_trans_secs\n"); + return -EINVAL; + } + if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { + pr_err("Passed implict_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICT_TRANS_SECS); + return -EINVAL; + } + tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; + + return count; +} + ssize_t core_alua_show_preferred_bit( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index c5b4ecd3e74..f920c170d47 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -52,6 +52,12 @@ #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ /* + * Used for the recommended application client implict transition timeout + * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. + */ +#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICT_TRANS_SECS 255 +/* * Used by core_alua_update_tpg_primary_metadata() and * core_alua_update_tpg_secondary_metadata() */ @@ -66,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -extern int target_emulate_report_target_port_groups(struct se_task *); -extern int target_emulate_set_target_port_groups(struct se_task *); +extern int target_emulate_report_target_port_groups(struct se_cmd *); +extern int target_emulate_set_target_port_groups(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, @@ -107,6 +113,10 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, const char *, size_t); +extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, + char *); +extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, + const char *, size_t); extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *, diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 30a67707036..9888693a18f 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -432,6 +432,7 @@ static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; + u32 max_sectors; int have_tp = 0; /* @@ -456,7 +457,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]); + max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + put_unaligned_be32(max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH @@ -598,9 +601,8 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) return 0; } -int target_emulate_inquiry(struct se_task *task) +int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; unsigned char *buf, *map_buf; @@ -664,16 +666,13 @@ out: } transport_kunmap_data_sg(cmd); - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } -int target_emulate_readcapacity(struct se_task *task) +int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); @@ -697,14 +696,12 @@ int target_emulate_readcapacity(struct se_task *task) transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_readcapacity_16(struct se_task *task) +int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -732,8 +729,7 @@ int target_emulate_readcapacity_16(struct se_task *task) transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -872,9 +868,8 @@ target_modesense_dpofua(unsigned char *buf, int type) } } -int target_emulate_modesense(struct se_task *task) +int target_emulate_modesense(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; unsigned char *rbuf; @@ -947,14 +942,12 @@ int target_emulate_modesense(struct se_task *task) memcpy(rbuf, buf, offset); transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_request_sense(struct se_task *task) +int target_emulate_request_sense(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; unsigned char *cdb = cmd->t_task_cdb; unsigned char *buf; u8 ua_asc = 0, ua_ascq = 0; @@ -1008,8 +1001,7 @@ int target_emulate_request_sense(struct se_task *task) end: transport_kunmap_data_sg(cmd); - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } @@ -1017,9 +1009,8 @@ end: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -int target_emulate_unmap(struct se_task *task) +int target_emulate_unmap(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf, *ptr = NULL; unsigned char *cdb = &cmd->t_task_cdb[0]; @@ -1066,10 +1057,8 @@ int target_emulate_unmap(struct se_task *task) err: transport_kunmap_data_sg(cmd); - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -1077,9 +1066,8 @@ err: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -int target_emulate_write_same(struct se_task *task) +int target_emulate_write_same(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; sector_t range; sector_t lba = cmd->t_task_lba; @@ -1118,79 +1106,25 @@ int target_emulate_write_same(struct se_task *task) return ret; } - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } -int target_emulate_synchronize_cache(struct se_task *task) +int target_emulate_synchronize_cache(struct se_cmd *cmd) { - struct se_device *dev = task->task_se_cmd->se_dev; - struct se_cmd *cmd = task->task_se_cmd; - - if (!dev->transport->do_sync_cache) { + if (!cmd->se_dev->transport->do_sync_cache) { pr_err("SYNCHRONIZE_CACHE emulation not supported" - " for: %s\n", dev->transport->name); + " for: %s\n", cmd->se_dev->transport->name); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; return -ENOSYS; } - dev->transport->do_sync_cache(task); + cmd->se_dev->transport->do_sync_cache(cmd); return 0; } -int target_emulate_noop(struct se_task *task) +int target_emulate_noop(struct se_cmd *cmd) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, GOOD); return 0; } - -/* - * Write a CDB into @cdb that is based on the one the intiator sent us, - * but updated to only cover the sectors that the current task handles. - */ -void target_get_task_cdb(struct se_task *task, unsigned char *cdb) -{ - struct se_cmd *cmd = task->task_se_cmd; - unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); - - memcpy(cdb, cmd->t_task_cdb, cdb_len); - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - unsigned long long lba = task->task_lba; - u32 sectors = task->task_sectors; - - switch (cdb_len) { - case 6: - /* 21-bit LBA and 8-bit sectors */ - cdb[1] = (lba >> 16) & 0x1f; - cdb[2] = (lba >> 8) & 0xff; - cdb[3] = lba & 0xff; - cdb[4] = sectors & 0xff; - break; - case 10: - /* 32-bit LBA and 16-bit sectors */ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(sectors, &cdb[7]); - break; - case 12: - /* 32-bit LBA and 32-bit sectors */ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[6]); - break; - case 16: - /* 64-bit LBA and 32-bit sectors */ - put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[10]); - break; - case 32: - /* 64-bit LBA and 32-bit sectors, extended CDB */ - put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(sectors, &cdb[28]); - break; - default: - BUG(); - } - } -} -EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index cbb66537d23..801efa89204 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB_RO(hw_max_sectors); SE_DEV_ATTR_RO(hw_max_sectors); -DEF_DEV_ATTRIB(max_sectors); -SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR); - DEF_DEV_ATTRIB(fabric_max_sectors); SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); @@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, - &target_core_dev_attrib_max_sectors.attr, &target_core_dev_attrib_fabric_max_sectors.attr, &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, @@ -2451,6 +2447,26 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* + * implict_trans_secs + */ +static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + char *page) +{ + return core_alua_show_implict_trans_secs(tg_pt_gp, page); +} + +static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( + struct t10_alua_tg_pt_gp *tg_pt_gp, + const char *page, + size_t count) +{ + return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); +} + +SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); + +/* * preferred */ @@ -2574,6 +2590,7 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, + &target_core_alua_tg_pt_gp_implict_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index aa626774638..5ad972856a8 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -643,9 +643,8 @@ void core_dev_unexport( lun->lun_se_dev = NULL; } -int target_report_luns(struct se_task *se_task) +int target_report_luns(struct se_cmd *se_cmd) { - struct se_cmd *se_cmd = se_task->task_se_cmd; struct se_dev_entry *deve; struct se_session *se_sess = se_cmd->se_sess; unsigned char *buf; @@ -696,8 +695,7 @@ done: buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(se_cmd); - se_task->task_scsi_status = GOOD; - transport_complete_task(se_task, 1); + target_complete_cmd(se_cmd, GOOD); return 0; } @@ -878,15 +876,12 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* - * max_sectors is based on subsystem plugin dependent requirements. + * Align max_hw_sectors down to PAGE_SIZE I/O transfers */ - dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, + limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors, limits->logical_block_size); - dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + /* * Set fabric_max_sectors, which is reported in block limits * VPD page (B0h). @@ -1170,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) return 0; } -int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) -{ - int force = 0; /* Force setting for VDEVS */ - - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - pr_err("dev[%p]: Unable to change SE Device" - " max_sectors while dev_export_obj: %d count exists\n", - dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -EINVAL; - } - if (!max_sectors) { - pr_err("dev[%p]: Illegal ZERO value for" - " max_sectors\n", dev); - return -EINVAL; - } - if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - pr_err("dev[%p]: Passed max_sectors: %u less than" - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MIN); - return -EINVAL; - } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors:" - " %u\n", dev, max_sectors, - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - } else { - if (!force && (max_sectors > - dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than TCM/SE_Device max_sectors" - ": %u, use force=1 to override.\n", dev, - max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); - return -EINVAL; - } - if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } - } - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - max_sectors = se_dev_align_max_sectors(max_sectors, - dev->se_sub_dev->se_dev_attrib.block_size); - - dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", - dev, max_sectors); - return 0; -} - int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { @@ -1341,7 +1278,6 @@ struct se_lun *core_dev_add_lun( u32 lun) { struct se_lun *lun_p; - u32 lun_access = 0; int rc; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { @@ -1354,12 +1290,8 @@ struct se_lun *core_dev_add_lun( if (IS_ERR(lun_p)) return lun_p; - if (dev->dev_flags & DF_READ_ONLY) - lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; - else - lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; - - rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); + rc = core_tpg_post_addlun(tpg, lun_p, + TRANSPORT_LUNFLAGS_READ_WRITE, dev); if (rc < 0) return ERR_PTR(rc); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7ed58e2df79..686dba189f8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -133,15 +133,10 @@ static struct se_device *fd_create_virtdevice( ret = PTR_ERR(dev_p); goto fail; } -#if 0 - if (di->no_create_file) - flags = O_RDWR | O_LARGEFILE; - else - flags = O_RDWR | O_CREAT | O_LARGEFILE; -#else + + /* O_DIRECT too? */ flags = O_RDWR | O_CREAT | O_LARGEFILE; -#endif -/* flags |= O_DIRECT; */ + /* * If fd_buffered_io=1 has not been set explicitly (the default), * use O_SYNC to force FILEIO writes to disk. @@ -169,6 +164,7 @@ static struct se_device *fd_create_virtdevice( inode = file->f_mapping->host; if (S_ISBLK(inode->i_mode)) { struct request_queue *q; + unsigned long long dev_size; /* * Setup the local scope queue_limits from struct request_queue->limits * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. @@ -183,13 +179,12 @@ static struct se_device *fd_create_virtdevice( * one (1) logical sector from underlying struct block_device */ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); - fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - + dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_block_size); pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n", - fd_dev->fd_dev_size, - div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), + dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { @@ -249,53 +244,33 @@ static void fd_free_device(void *p) kfree(fd_dev); } -static inline struct fd_request *FILE_REQ(struct se_task *task) +static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents) { - return container_of(task, struct fd_request, fd_task); -} - - -static struct se_task * -fd_alloc_task(unsigned char *cdb) -{ - struct fd_request *fd_req; - - fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); - if (!fd_req) { - pr_err("Unable to allocate struct fd_request\n"); - return NULL; - } - - return &fd_req->fd_task; -} - -static int fd_do_readv(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct se_device *se_dev = cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; - struct scatterlist *sg = task->task_sg; + struct scatterlist *sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * + loff_t pos = (cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_readv iov[]\n"); return -ENOMEM; } - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); + ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); set_fs(old_fs); kfree(iov); @@ -305,10 +280,10 @@ static int fd_do_readv(struct se_task *task) * block_device. */ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { - if (ret < 0 || ret != task->task_size) { + if (ret < 0 || ret != cmd->data_length) { pr_err("vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, - (int)task->task_size); + (int)cmd->data_length); return (ret < 0 ? ret : -EINVAL); } } else { @@ -322,38 +297,38 @@ static int fd_do_readv(struct se_task *task) return 1; } -static int fd_do_writev(struct se_task *task) +static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents) { - struct fd_request *req = FILE_REQ(task); - struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct se_device *se_dev = cmd->se_dev; struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; - struct scatterlist *sg = task->task_sg; + struct scatterlist *sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * + loff_t pos = (cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); if (!iov) { pr_err("Unable to allocate fd_do_writev iov[]\n"); return -ENOMEM; } - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { iov[i].iov_len = sg->length; iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); + ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); set_fs(old_fs); kfree(iov); - if (ret < 0 || ret != task->task_size) { + if (ret < 0 || ret != cmd->data_length) { pr_err("vfs_writev() returned %d\n", ret); return (ret < 0 ? ret : -EINVAL); } @@ -361,9 +336,8 @@ static int fd_do_writev(struct se_task *task) return 1; } -static void fd_emulate_sync_cache(struct se_task *task) +static void fd_emulate_sync_cache(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); @@ -375,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_task *task) * for this SYNCHRONIZE_CACHE op */ if (immed) - transport_complete_sync_cache(cmd, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); /* * Determine if we will be flushing the entire device. @@ -395,33 +369,37 @@ static void fd_emulate_sync_cache(struct se_task *task) if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); - if (!immed) - transport_complete_sync_cache(cmd, ret == 0); + if (immed) + return; + + if (ret) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + } else { + target_complete_cmd(cmd, SAM_STAT_GOOD); + } } -/* - * WRITE Force Unit Access (FUA) emulation on a per struct se_task - * LBA range basis.. - */ -static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) +static void fd_emulate_write_fua(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; - loff_t end = start + task->task_size; + loff_t start = cmd->t_task_lba * + dev->se_sub_dev->se_dev_attrib.block_size; + loff_t end = start + cmd->data_length; int ret; pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", - task->task_lba, task->task_size); + cmd->t_task_lba, cmd->data_length); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); } -static int fd_do_task(struct se_task *task) +static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; int ret = 0; @@ -429,10 +407,10 @@ static int fd_do_task(struct se_task *task) * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. */ - if (task->task_data_direction == DMA_FROM_DEVICE) { - ret = fd_do_readv(task); + if (data_direction == DMA_FROM_DEVICE) { + ret = fd_do_readv(cmd, sgl, sgl_nents); } else { - ret = fd_do_writev(task); + ret = fd_do_writev(cmd, sgl, sgl_nents); if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && @@ -443,7 +421,7 @@ static int fd_do_task(struct se_task *task) * and return some sense data to let the initiator * know the FUA WRITE cache sync failed..? */ - fd_emulate_write_fua(cmd, task); + fd_emulate_write_fua(cmd); } } @@ -452,24 +430,11 @@ static int fd_do_task(struct se_task *task) cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return ret; } - if (ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (ret) + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } -/* fd_free_task(): (Part of se_subsystem_api_t template) - * - * - */ -static void fd_free_task(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - - kfree(req); -} - enum { Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err }; @@ -605,10 +570,20 @@ static u32 fd_get_device_type(struct se_device *dev) static sector_t fd_get_blocks(struct se_device *dev) { struct fd_dev *fd_dev = dev->dev_ptr; - unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, - dev->se_sub_dev->se_dev_attrib.block_size); + struct file *f = fd_dev->fd_file; + struct inode *i = f->f_mapping->host; + unsigned long long dev_size; + /* + * When using a file that references an underlying struct block_device, + * ensure dev_size is always based on the current inode size in order + * to handle underlying block_device resize operations. + */ + if (S_ISBLK(i->i_mode)) + dev_size = (i_size_read(i) - fd_dev->fd_block_size); + else + dev_size = fd_dev->fd_dev_size; - return blocks_long; + return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); } static struct se_subsystem_api fileio_template = { @@ -622,10 +597,8 @@ static struct se_subsystem_api fileio_template = { .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, - .alloc_task = fd_alloc_task, - .do_task = fd_do_task, + .execute_cmd = fd_execute_cmd, .do_sync_cache = fd_emulate_sync_cache, - .free_task = fd_free_task, .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 59e6e73106c..fbd59ef7d8b 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -12,10 +12,6 @@ #define RRF_EMULATE_CDB 0x01 #define RRF_GOT_LBA 0x02 -struct fd_request { - struct se_task fd_task; -}; - #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 #define FDBD_USE_BUFFERED_IO 0x04 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 2ec299e8a73..fd47950727b 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -189,26 +189,6 @@ static void iblock_free_device(void *p) kfree(ib_dev); } -static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) -{ - return container_of(task, struct iblock_req, ib_task); -} - -static struct se_task * -iblock_alloc_task(unsigned char *cdb) -{ - struct iblock_req *ib_req; - - ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); - if (!ib_req) { - pr_err("Unable to allocate memory for struct iblock_req\n"); - return NULL; - } - - atomic_set(&ib_req->pending, 1); - return &ib_req->ib_task; -} - static unsigned long long iblock_emulate_read_cap_with_block_size( struct se_device *dev, struct block_device *bd, @@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err) if (err) pr_err("IBLOCK: cache flush failed: %d\n", err); - if (cmd) - transport_complete_sync_cache(cmd, err == 0); + if (cmd) { + if (err) { + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); + } else { + target_complete_cmd(cmd, SAM_STAT_GOOD); + } + } + bio_put(bio); } @@ -304,9 +292,8 @@ static void iblock_end_io_flush(struct bio *bio, int err) * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache. */ -static void iblock_emulate_sync_cache(struct se_task *task) +static void iblock_emulate_sync_cache(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); struct bio *bio; @@ -316,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) * for this SYNCHRONIZE_CACHE op. */ if (immed) - transport_complete_sync_cache(cmd, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; @@ -335,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); } -static void iblock_free_task(struct se_task *task) -{ - kfree(IBLOCK_REQ(task)); -} - enum { Opt_udev_path, Opt_force, Opt_err }; @@ -448,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params( return bl; } +static void iblock_complete_cmd(struct se_cmd *cmd) +{ + struct iblock_req *ibr = cmd->priv; + u8 status; + + if (!atomic_dec_and_test(&ibr->pending)) + return; + + if (atomic_read(&ibr->ib_bio_err_cnt)) + status = SAM_STAT_CHECK_CONDITION; + else + status = SAM_STAT_GOOD; + + target_complete_cmd(cmd, status); + kfree(ibr); +} + static void iblock_bio_destructor(struct bio *bio) { - struct se_task *task = bio->bi_private; - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; + struct se_cmd *cmd = bio->bi_private; + struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; bio_free(bio, ib_dev->ibd_bio_set); } static struct bio * -iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) +iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) { - struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; - struct iblock_req *ib_req = IBLOCK_REQ(task); + struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; struct bio *bio; /* @@ -476,19 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) return NULL; } - pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" - " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); - pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); - bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = task; + bio->bi_private = cmd; bio->bi_destructor = iblock_bio_destructor; bio->bi_end_io = &iblock_bio_done; bio->bi_sector = lba; - atomic_inc(&ib_req->pending); - - pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); - pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending)); return bio; } @@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw) blk_finish_plug(&plug); } -static int iblock_do_task(struct se_task *task) +static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - struct iblock_req *ibr = IBLOCK_REQ(task); + struct iblock_req *ibr; struct bio *bio; struct bio_list list; struct scatterlist *sg; - u32 i, sg_num = task->task_sg_nents; + u32 sg_num = sgl_nents; sector_t block_lba; unsigned bio_cnt; int rw; + int i; - if (task->task_data_direction == DMA_TO_DEVICE) { + if (data_direction == DMA_TO_DEVICE) { /* * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. @@ -532,17 +523,17 @@ static int iblock_do_task(struct se_task *task) } /* - * Do starting conversion up from non 512-byte blocksize with - * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. + * Convert the blocksize advertised to the initiator to the 512 byte + * units unconditionally used by the Linux block layer. */ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) - block_lba = (task->task_lba << 3); + block_lba = (cmd->t_task_lba << 3); else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) - block_lba = (task->task_lba << 2); + block_lba = (cmd->t_task_lba << 2); else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) - block_lba = (task->task_lba << 1); + block_lba = (cmd->t_task_lba << 1); else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) - block_lba = task->task_lba; + block_lba = cmd->t_task_lba; else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); @@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task) return -ENOSYS; } - bio = iblock_get_bio(task, block_lba, sg_num); - if (!bio) { - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENOMEM; - } + ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); + if (!ibr) + goto fail; + cmd->priv = ibr; + + bio = iblock_get_bio(cmd, block_lba, sgl_nents); + if (!bio) + goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); + + atomic_set(&ibr->pending, 2); bio_cnt = 1; - for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and @@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task) bio_cnt = 0; } - bio = iblock_get_bio(task, block_lba, sg_num); + bio = iblock_get_bio(cmd, block_lba, sg_num); if (!bio) - goto fail; + goto fail_put_bios; + + atomic_inc(&ibr->pending); bio_list_add(&list, bio); bio_cnt++; } @@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task) } iblock_submit_bios(&list, rw); - - if (atomic_dec_and_test(&ibr->pending)) { - transport_complete_task(task, - !atomic_read(&ibr->ib_bio_err_cnt)); - } + iblock_complete_cmd(cmd); return 0; -fail: +fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); +fail_free_ibr: + kfree(ibr); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +fail: return -ENOMEM; } @@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev) static void iblock_bio_done(struct bio *bio, int err) { - struct se_task *task = bio->bi_private; - struct iblock_req *ibr = IBLOCK_REQ(task); + struct se_cmd *cmd = bio->bi_private; + struct iblock_req *ibr = cmd->priv; /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 @@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err) bio_put(bio); - if (!atomic_dec_and_test(&ibr->pending)) - return; - - pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, - (unsigned long long)bio->bi_sector, err); - - transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); + iblock_complete_cmd(cmd); } static struct se_subsystem_api iblock_template = { @@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = { .allocate_virtdevice = iblock_allocate_virtdevice, .create_virtdevice = iblock_create_virtdevice, .free_device = iblock_free_device, - .alloc_task = iblock_alloc_task, - .do_task = iblock_do_task, + .execute_cmd = iblock_execute_cmd, .do_discard = iblock_do_discard, .do_sync_cache = iblock_emulate_sync_cache, - .free_task = iblock_free_task, .check_configfs_dev_params = iblock_check_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index e929370b6fd..66cf7b9e205 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -7,7 +7,6 @@ #define IBLOCK_LBA_SHIFT 9 struct iblock_req { - struct se_task ib_task; atomic_t pending; atomic_t ib_bio_err_cnt; } ____cacheline_aligned; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 21c05638f15..165e8242968 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -5,15 +5,15 @@ extern struct t10_alua_lu_gp *default_lu_gp; /* target_core_cdb.c */ -int target_emulate_inquiry(struct se_task *task); -int target_emulate_readcapacity(struct se_task *task); -int target_emulate_readcapacity_16(struct se_task *task); -int target_emulate_modesense(struct se_task *task); -int target_emulate_request_sense(struct se_task *task); -int target_emulate_unmap(struct se_task *task); -int target_emulate_write_same(struct se_task *task); -int target_emulate_synchronize_cache(struct se_task *task); -int target_emulate_noop(struct se_task *task); +int target_emulate_inquiry(struct se_cmd *cmd); +int target_emulate_readcapacity(struct se_cmd *cmd); +int target_emulate_readcapacity_16(struct se_cmd *cmd); +int target_emulate_modesense(struct se_cmd *cmd); +int target_emulate_request_sense(struct se_cmd *cmd); +int target_emulate_unmap(struct se_cmd *cmd); +int target_emulate_write_same(struct se_cmd *cmd); +int target_emulate_synchronize_cache(struct se_cmd *cmd); +int target_emulate_noop(struct se_cmd *cmd); /* target_core_device.c */ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); @@ -28,7 +28,7 @@ int core_dev_export(struct se_device *, struct se_portal_group *, struct se_lun *); void core_dev_unexport(struct se_device *, struct se_portal_group *, struct se_lun *); -int target_report_luns(struct se_task *); +int target_report_luns(struct se_cmd *); void se_release_device_for_hba(struct se_device *); void se_release_vpd_for_dev(struct se_device *); int se_free_virtual_device(struct se_device *, struct se_hba *); @@ -104,8 +104,7 @@ void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); void transport_cmd_finish_abort(struct se_cmd *, int); -void __transport_remove_task_from_execute_queue(struct se_task *, - struct se_device *); +void __target_remove_from_execute_list(struct se_cmd *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, @@ -114,7 +113,7 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); -bool target_stop_task(struct se_task *task, unsigned long *flags); +bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); int transport_clear_lun_from_sessions(struct se_lun *); void transport_send_task_abort(struct se_cmd *); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 86f0c3b5d50..85564998500 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -193,9 +193,8 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd) return 0; } -int target_scsi2_reservation_release(struct se_task *task) +int target_scsi2_reservation_release(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; @@ -220,6 +219,9 @@ int target_scsi2_reservation_release(struct se_task *task) if (dev->dev_reserved_node_acl != sess->se_node_acl) goto out_unlock; + if (dev->dev_res_bin_isid != sess->sess_bin_isid) + goto out_unlock; + dev->dev_reserved_node_acl = NULL; dev->dev_flags &= ~DF_SPC2_RESERVATIONS; if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { @@ -234,16 +236,13 @@ int target_scsi2_reservation_release(struct se_task *task) out_unlock: spin_unlock(&dev->dev_reservation_lock); out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } -int target_scsi2_reservation_reserve(struct se_task *task) +int target_scsi2_reservation_reserve(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; @@ -304,10 +303,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) out_unlock: spin_unlock(&dev->dev_reservation_lock); out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -500,11 +497,10 @@ static int core_scsi3_pr_seq_non_holder( * statement. */ if (!ret && !other_cdb) { -#if 0 pr_debug("Allowing explict CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); -#endif + return ret; } /* @@ -532,14 +528,14 @@ static int core_scsi3_pr_seq_non_holder( * as we expect registered non-reservation holding * nexuses to issue CDBs. */ -#if 0 + if (!registered_nexus) { pr_debug("Allowing implict CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); } -#endif + return 0; } } else if ((reg_only) || (all_reg)) { @@ -548,11 +544,11 @@ static int core_scsi3_pr_seq_non_holder( * For PR_*_REG_ONLY and PR_*_ALL_REG reservations, * allow commands from registered nexuses. */ -#if 0 + pr_debug("Allowing implict CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); -#endif + return 0; } } @@ -1666,12 +1662,12 @@ static int core_scsi3_decode_spec_i_port( ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" " tid_len: %d for %s + %s\n", dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, tpdl, tid_len, i_str, iport_ptr); -#endif + if (tid_len > tpdl) { pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" " %u for Transport ID: %s\n", tid_len, ptr); @@ -1714,12 +1710,12 @@ static int core_scsi3_decode_spec_i_port( ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" " dest_se_deve mapped_lun: %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); -#endif + /* * Skip any TransportIDs that already have a registration for * this target port. @@ -3473,10 +3469,10 @@ static int core_scsi3_emulate_pro_register_and_move( buf = transport_kmap_data_sg(cmd); proto_ident = (buf[24] & 0x0f); -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" " 0x%02x\n", proto_ident); -#endif + if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" " proto_ident: 0x%02x does not match ident: 0x%02x" @@ -3575,11 +3571,11 @@ after_iport_check: ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" " %s from TransportID\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname); -#endif + /* * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET * PORT IDENTIFIER. @@ -3603,12 +3599,12 @@ after_iport_check: ret = -EINVAL; goto out; } -#if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" " ACL for dest_se_deve->mapped_lun: %u\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); -#endif + /* * A persistent reservation needs to already existing in order to * successfully complete the REGISTER_AND_MOVE service action.. @@ -3799,9 +3795,8 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) /* * See spc4r17 section 6.14 Table 170 */ -int target_scsi3_emulate_pr_out(struct se_task *task) +int target_scsi3_emulate_pr_out(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; unsigned char *cdb = &cmd->t_task_cdb[0]; unsigned char *buf; u64 res_key, sa_res_key; @@ -3941,10 +3936,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) } out: - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } @@ -4299,9 +4292,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return 0; } -int target_scsi3_emulate_pr_in(struct se_task *task) +int target_scsi3_emulate_pr_in(struct se_cmd *cmd) { - struct se_cmd *cmd = task->task_se_cmd; int ret; /* @@ -4342,10 +4334,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) break; } - if (!ret) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + if (!ret) + target_complete_cmd(cmd, GOOD); return ret; } diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 7a233feb7e9..af6c460d886 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache; extern int core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); -extern int target_scsi2_reservation_release(struct se_task *task); -extern int target_scsi2_reservation_reserve(struct se_task *task); +extern int target_scsi2_reservation_release(struct se_cmd *); +extern int target_scsi2_reservation_reserve(struct se_cmd *); extern int core_scsi3_alloc_aptpl_registration( struct t10_reservation *, u64, unsigned char *, unsigned char *, u32, @@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, extern void core_scsi3_free_all_registrations(struct se_device *); extern unsigned char *core_scsi3_pr_dump_type(int); -extern int target_scsi3_emulate_pr_in(struct se_task *task); -extern int target_scsi3_emulate_pr_out(struct se_task *task); +extern int target_scsi3_emulate_pr_in(struct se_cmd *); +extern int target_scsi3_emulate_pr_out(struct se_cmd *); extern int core_setup_reservations(struct se_device *, int); #endif /* TARGET_CORE_PR_H */ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 94c905fcbce..4ce2cf642fc 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -663,22 +663,12 @@ static void pscsi_free_device(void *p) kfree(pdv); } -static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) +static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) { - return container_of(task, struct pscsi_plugin_task, pscsi_task); -} - - -/* pscsi_transport_complete(): - * - * - */ -static int pscsi_transport_complete(struct se_task *task) -{ - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct scsi_device *sd = pdv->pdv_sd; int result; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_plugin_task *pt = cmd->priv; unsigned char *cdb = &pt->pscsi_cdb[0]; result = pt->pscsi_result; @@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task) */ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { - if (!task->task_se_cmd->se_deve) + if (!cmd->se_deve) goto after_mode_sense; - if (task->task_se_cmd->se_deve->lun_flags & - TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); + if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { + unsigned char *buf = transport_kmap_data_sg(cmd); if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task) buf[2] |= 0x80; } - transport_kunmap_data_sg(task->task_se_cmd); + transport_kunmap_data_sg(cmd); } } after_mode_sense: @@ -722,7 +711,6 @@ after_mode_sense: if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { unsigned char *buf; - struct scatterlist *sg = task->task_sg; u16 bdl; u32 blocksize; @@ -757,35 +745,6 @@ after_mode_select: return 0; } -static struct se_task * -pscsi_alloc_task(unsigned char *cdb) -{ - struct pscsi_plugin_task *pt; - - /* - * Dynamically alloc cdb space, since it may be larger than - * TCM_MAX_COMMAND_SIZE - */ - pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); - if (!pt) { - pr_err("Unable to allocate struct pscsi_plugin_task\n"); - return NULL; - } - - return &pt->pscsi_task; -} - -static void pscsi_free_task(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - - /* - * We do not release the bio(s) here associated with this task, as - * this is handled by bio_put() and pscsi_bi_endio(). - */ - kfree(pt); -} - enum { Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, Opt_scsi_lun_id, Opt_err @@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, +static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction, struct bio **hbio) { - struct se_cmd *cmd = task->task_se_cmd; - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; - u32 task_sg_num = task->task_sg_nents; + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct bio *bio = NULL, *tbio = NULL; struct page *page; struct scatterlist *sg; - u32 data_len = task->task_size, i, len, bytes, off; - int nr_pages = (task->task_size + task_sg[0].offset + + u32 data_len = cmd->data_length, i, len, bytes, off; + int nr_pages = (cmd->data_length + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; int nr_vecs = 0, rc; - int rw = (task->task_data_direction == DMA_TO_DEVICE); + int rw = (data_direction == DMA_TO_DEVICE); *hbio = NULL; pr_debug("PSCSI: nr_pages: %d\n", nr_pages); - for_each_sg(task_sg, sg, task_sg_num, i) { + for_each_sg(sgl, sg, sgl_nents, i) { page = sg_page(sg); off = sg->offset; len = sg->length; @@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, * Set *hbio pointer to handle the case: * nr_pages > BIO_MAX_PAGES, where additional * bios need to be added to complete a given - * struct se_task + * command. */ if (!*hbio) *hbio = tbio = bio; @@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, } } - return task->task_sg_nents; + return sgl_nents; fail: while (*hbio) { bio = *hbio; @@ -1061,52 +1019,61 @@ fail: return -ENOMEM; } -static int pscsi_do_task(struct se_task *task) +static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_cmd *cmd = task->task_se_cmd; - struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; + struct pscsi_plugin_task *pt; struct request *req; struct bio *hbio; int ret; - target_get_task_cdb(task, pt->pscsi_cdb); + /* + * Dynamically alloc cdb space, since it may be larger than + * TCM_MAX_COMMAND_SIZE + */ + pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL); + if (!pt) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; + } + cmd->priv = pt; + + memcpy(pt->pscsi_cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); - if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { + if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { req = blk_get_request(pdv->pdv_sd->request_queue, - (task->task_data_direction == DMA_TO_DEVICE), + (data_direction == DMA_TO_DEVICE), GFP_KERNEL); if (!req || IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed: %ld\n", req ? IS_ERR(req) : -ENOMEM); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -ENODEV; + goto fail; } } else { - BUG_ON(!task->task_size); + BUG_ON(!cmd->data_length); - /* - * Setup the main struct request for the task->task_sg[] payload - */ - ret = pscsi_map_sg(task, task->task_sg, &hbio); + ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio); if (ret < 0) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return ret; + goto fail; } req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); if (IS_ERR(req)) { pr_err("pSCSI: blk_make_request() failed\n"); - goto fail; + goto fail_free_bio; } } req->cmd_type = REQ_TYPE_BLOCK_PC; req->end_io = pscsi_req_done; - req->end_io_data = task; + req->end_io_data = cmd; req->cmd_len = scsi_command_size(pt->pscsi_cdb); req->cmd = &pt->pscsi_cdb[0]; req->sense = &pt->pscsi_sense[0]; @@ -1118,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task) req->retries = PS_RETRY; blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, - (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), + (cmd->sam_task_attr == MSG_HEAD_TAG), pscsi_req_done); return 0; -fail: +fail_free_bio: while (hbio) { struct bio *bio = hbio; hbio = hbio->bi_next; @@ -1131,16 +1098,14 @@ fail: bio_endio(bio, 0); /* XXX: should be error */ } cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +fail: + kfree(pt); return -ENOMEM; } -/* pscsi_get_sense_buffer(): - * - * - */ -static unsigned char *pscsi_get_sense_buffer(struct se_task *task) +static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd) { - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct pscsi_plugin_task *pt = cmd->priv; return pt->pscsi_sense; } @@ -1180,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev) return 0; } -/* pscsi_handle_SAM_STATUS_failures(): - * - * - */ -static inline void pscsi_process_SAM_status( - struct se_task *task, - struct pscsi_plugin_task *pt) +static void pscsi_req_done(struct request *req, int uptodate) { - task->task_scsi_status = status_byte(pt->pscsi_result); - if (task->task_scsi_status) { - task->task_scsi_status <<= 1; - pr_debug("PSCSI Status Byte exception at task: %p CDB:" - " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + struct se_cmd *cmd = req->end_io_data; + struct pscsi_plugin_task *pt = cmd->priv; + + pt->pscsi_result = req->errors; + pt->pscsi_resid = req->resid_len; + + cmd->scsi_status = status_byte(pt->pscsi_result) << 1; + if (cmd->scsi_status) { + pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], pt->pscsi_result); } switch (host_byte(pt->pscsi_result)) { case DID_OK: - transport_complete_task(task, (!task->task_scsi_status)); + target_complete_cmd(cmd, cmd->scsi_status); break; default: - pr_debug("PSCSI Host Byte exception at task: %p CDB:" - " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], + pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" + " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], pt->pscsi_result); - task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_se_cmd->scsi_sense_reason = - TCM_UNSUPPORTED_SCSI_OPCODE; - transport_complete_task(task, 0); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); break; } -} -static void pscsi_req_done(struct request *req, int uptodate) -{ - struct se_task *task = req->end_io_data; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - - pt->pscsi_result = req->errors; - pt->pscsi_resid = req->resid_len; - - pscsi_process_SAM_status(task, pt); __blk_put_request(req->q, req); + kfree(pt); } static struct se_subsystem_api pscsi_template = { @@ -1235,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = { .create_virtdevice = pscsi_create_virtdevice, .free_device = pscsi_free_device, .transport_complete = pscsi_transport_complete, - .alloc_task = pscsi_alloc_task, - .do_task = pscsi_do_task, - .free_task = pscsi_free_task, + .execute_cmd = pscsi_execute_cmd, .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 43f1c419e8e..bc1e5e11eca 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -22,7 +22,6 @@ #include <linux/kobject.h> struct pscsi_plugin_task { - struct se_task pscsi_task; unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; int pscsi_direction; int pscsi_result; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 8b68f7b8263..d0ceb873c0e 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -64,9 +64,6 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); - pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" - " MaxSectors: %u\n", hba->hba_id, - rd_host->rd_host_id, RD_MAX_SECTORS); return 0; } @@ -199,10 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) return 0; } -static void *rd_allocate_virtdevice( - struct se_hba *hba, - const char *name, - int rd_direct) +static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) { struct rd_dev *rd_dev; struct rd_host *rd_host = hba->hba_ptr; @@ -214,25 +208,12 @@ static void *rd_allocate_virtdevice( } rd_dev->rd_host = rd_host; - rd_dev->rd_direct = rd_direct; return rd_dev; } -static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) -{ - return rd_allocate_virtdevice(hba, name, 0); -} - -/* rd_create_virtdevice(): - * - * - */ -static struct se_device *rd_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p, - int rd_direct) +static struct se_device *rd_create_virtdevice(struct se_hba *hba, + struct se_subsystem_dev *se_dev, void *p) { struct se_device *dev; struct se_dev_limits dev_limits; @@ -247,13 +228,12 @@ static struct se_device *rd_create_virtdevice( if (ret < 0) goto fail; - snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); - snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : - RD_MCP_VERSION); + snprintf(prod, 16, "RAMDISK-MCP"); + snprintf(rev, 4, "%s", RD_MCP_VERSION); dev_limits.limits.logical_block_size = RD_BLOCKSIZE; - dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; - dev_limits.limits.max_sectors = RD_MAX_SECTORS; + dev_limits.limits.max_hw_sectors = UINT_MAX; + dev_limits.limits.max_sectors = UINT_MAX; dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; @@ -264,12 +244,10 @@ static struct se_device *rd_create_virtdevice( goto fail; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; - rd_dev->rd_queue_depth = dev->queue_depth; - pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" + pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" " %u pages in %u tables, %lu total bytes\n", - rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : - "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, + rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count, (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); @@ -280,18 +258,6 @@ fail: return ERR_PTR(ret); } -static struct se_device *rd_MEMCPY_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p) -{ - return rd_create_virtdevice(hba, se_dev, p, 0); -} - -/* rd_free_device(): (Part of se_subsystem_api_t template) - * - * - */ static void rd_free_device(void *p) { struct rd_dev *rd_dev = p; @@ -300,29 +266,6 @@ static void rd_free_device(void *p) kfree(rd_dev); } -static inline struct rd_request *RD_REQ(struct se_task *task) -{ - return container_of(task, struct rd_request, rd_task); -} - -static struct se_task * -rd_alloc_task(unsigned char *cdb) -{ - struct rd_request *rd_req; - - rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); - if (!rd_req) { - pr_err("Unable to allocate struct rd_request\n"); - return NULL; - } - - return &rd_req->rd_task; -} - -/* rd_get_sg_table(): - * - * - */ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) { u32 i; @@ -341,31 +284,41 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } -static int rd_MEMCPY(struct rd_request *req, u32 read_rd) +static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, + u32 sgl_nents, enum dma_data_direction data_direction) { - struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; + struct se_device *se_dev = cmd->se_dev; + struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; - u32 rd_offset = req->rd_offset; + u32 rd_offset; + u32 rd_size; + u32 rd_page; u32 src_len; + u64 tmp; + + tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; + rd_offset = do_div(tmp, PAGE_SIZE); + rd_page = tmp; + rd_size = cmd->data_length; - table = rd_get_sg_table(dev, req->rd_page); + table = rd_get_sg_table(dev, rd_page); if (!table) return -EINVAL; - rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; + rd_sg = &table->sg_table[rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", - dev->rd_dev_id, read_rd ? "Read" : "Write", - task->task_lba, req->rd_size, req->rd_page, - rd_offset); + dev->rd_dev_id, + data_direction == DMA_FROM_DEVICE ? "Read" : "Write", + cmd->t_task_lba, rd_size, rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; - sg_miter_start(&m, task->task_sg, task->task_sg_nents, - read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); - while (req->rd_size) { + sg_miter_start(&m, sgl, sgl_nents, + data_direction == DMA_FROM_DEVICE ? + SG_MITER_TO_SG : SG_MITER_FROM_SG); + while (rd_size) { u32 len; void *rd_addr; @@ -375,13 +328,13 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) rd_addr = sg_virt(rd_sg) + rd_offset; - if (read_rd) + if (data_direction == DMA_FROM_DEVICE) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); - req->rd_size -= len; - if (!req->rd_size) + rd_size -= len; + if (!rd_size) continue; src_len -= len; @@ -391,15 +344,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) } /* rd page completed, next one please */ - req->rd_page++; + rd_page++; rd_offset = 0; src_len = PAGE_SIZE; - if (req->rd_page <= table->page_end_offset) { + if (rd_page <= table->page_end_offset) { rd_sg++; continue; } - table = rd_get_sg_table(dev, req->rd_page); + table = rd_get_sg_table(dev, rd_page); if (!table) { sg_miter_stop(&m); return -EINVAL; @@ -409,43 +362,11 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd) rd_sg = table->sg_table; } sg_miter_stop(&m); - return 0; -} -/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int rd_MEMCPY_do_task(struct se_task *task) -{ - struct se_device *dev = task->task_se_cmd->se_dev; - struct rd_request *req = RD_REQ(task); - u64 tmp; - int ret; - - tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; - req->rd_offset = do_div(tmp, PAGE_SIZE); - req->rd_page = tmp; - req->rd_size = task->task_size; - - ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); - if (ret != 0) - return ret; - - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } -/* rd_free_task(): (Part of se_subsystem_api_t template) - * - * - */ -static void rd_free_task(struct se_task *task) -{ - kfree(RD_REQ(task)); -} - enum { Opt_rd_pages, Opt_err }; @@ -512,9 +433,8 @@ static ssize_t rd_show_configfs_dev_params( char *b) { struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; - ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", - rd_dev->rd_dev_id, (rd_dev->rd_direct) ? - "rd_direct" : "rd_mcp"); + ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", + rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" " SG_table_count: %u\n", rd_dev->rd_page_count, PAGE_SIZE, rd_dev->sg_table_count); @@ -545,12 +465,10 @@ static struct se_subsystem_api rd_mcp_template = { .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .attach_hba = rd_attach_hba, .detach_hba = rd_detach_hba, - .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, - .create_virtdevice = rd_MEMCPY_create_virtdevice, + .allocate_virtdevice = rd_allocate_virtdevice, + .create_virtdevice = rd_create_virtdevice, .free_device = rd_free_device, - .alloc_task = rd_alloc_task, - .do_task = rd_MEMCPY_do_task, - .free_task = rd_free_task, + .execute_cmd = rd_execute_cmd, .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 784e56a0410..21458125fe5 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -2,7 +2,6 @@ #define TARGET_CORE_RD_H #define RD_HBA_VERSION "v4.0" -#define RD_DR_VERSION "4.0" #define RD_MCP_VERSION "4.0" /* Largest piece of memory kmalloc can allocate */ @@ -10,28 +9,11 @@ #define RD_DEVICE_QUEUE_DEPTH 32 #define RD_MAX_DEVICE_QUEUE_DEPTH 128 #define RD_BLOCKSIZE 512 -#define RD_MAX_SECTORS 1024 /* Used in target_core_init_configfs() for virtual LUN 0 access */ int __init rd_module_init(void); void rd_module_exit(void); -#define RRF_EMULATE_CDB 0x01 -#define RRF_GOT_LBA 0x02 - -struct rd_request { - struct se_task rd_task; - - /* Offset from start of page */ - u32 rd_offset; - /* Starting page in Ramdisk for request */ - u32 rd_page; - /* Total number of pages needed for request */ - u32 rd_page_count; - /* Scatterlist count */ - u32 rd_size; -} ____cacheline_aligned; - struct rd_dev_sg_table { u32 page_start_offset; u32 page_end_offset; @@ -42,7 +24,6 @@ struct rd_dev_sg_table { #define RDF_HAS_PAGE_COUNT 0x01 struct rd_dev { - int rd_direct; u32 rd_flags; /* Unique Ramdisk Device ID in Ramdisk HBA */ u32 rd_dev_id; @@ -50,7 +31,6 @@ struct rd_dev { u32 rd_page_count; /* Number of SG tables in sg_table_array */ u32 sg_table_count; - u32 rd_queue_depth; /* Array of rd_dev_sg_table_t containing scatterlists */ struct rd_dev_sg_table *sg_table_array; /* Ramdisk HBA device is connected to */ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index f015839aef8..84caf1bed9a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -244,7 +244,7 @@ static void core_tmr_drain_tmr_list( } } -static void core_tmr_drain_task_list( +static void core_tmr_drain_state_list( struct se_device *dev, struct se_cmd *prout_cmd, struct se_node_acl *tmr_nacl, @@ -252,12 +252,13 @@ static void core_tmr_drain_task_list( struct list_head *preempt_and_abort_list) { LIST_HEAD(drain_task_list); - struct se_cmd *cmd; - struct se_task *task, *task_tmp; + struct se_cmd *cmd, *next; unsigned long flags; int fe_count; + /* - * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. + * Complete outstanding commands with TASK_ABORTED SAM status. + * * This is following sam4r17, section 5.6 Aborting commands, Table 38 * for TMR LUN_RESET: * @@ -278,56 +279,43 @@ static void core_tmr_drain_task_list( * in the Control Mode Page. */ spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, - t_state_list) { - if (!task->task_se_cmd) { - pr_err("task->task_se_cmd is NULL!\n"); - continue; - } - cmd = task->task_se_cmd; - + list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) { /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; + /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. */ if (prout_cmd == cmd) continue; - list_move_tail(&task->t_state_list, &drain_task_list); - task->t_state_active = false; - /* - * Remove from task execute list before processing drain_task_list - */ - if (!list_empty(&task->t_execute_list)) - __transport_remove_task_from_execute_queue(task, dev); + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + + if (!list_empty(&cmd->execute_list)) + __target_remove_from_execute_list(cmd); } spin_unlock_irqrestore(&dev->execute_task_lock, flags); while (!list_empty(&drain_task_list)) { - task = list_entry(drain_task_list.next, struct se_task, t_state_list); - list_del(&task->t_state_list); - cmd = task->task_se_cmd; + cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); + list_del(&cmd->state_list); - pr_debug("LUN_RESET: %s cmd: %p task: %p" + pr_debug("LUN_RESET: %s cmd: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" "cdb: 0x%02x\n", - (preempt_and_abort_list) ? "Preempt" : "", cmd, task, + (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->t_task_cdb[0]); pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" - " t_task_cdbs: %d t_task_cdbs_left: %d" - " t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d" + " -- CMD_T_ACTIVE: %d" " CMD_T_STOP: %d CMD_T_SENT: %d\n", cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), (cmd->transport_state & CMD_T_ACTIVE) != 0, (cmd->transport_state & CMD_T_STOP) != 0, (cmd->transport_state & CMD_T_SENT) != 0); @@ -343,20 +331,13 @@ static void core_tmr_drain_task_list( cancel_work_sync(&cmd->work); spin_lock_irqsave(&cmd->t_state_lock, flags); - target_stop_task(task, &flags); + target_stop_cmd(cmd, &flags); - if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" - " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task_cdbs_ex_left)); - continue; - } fe_count = atomic_read(&cmd->t_fe_count); if (!(cmd->transport_state & CMD_T_ACTIVE)) { pr_debug("LUN_RESET: got CMD_T_ACTIVE for" - " task: %p, t_fe_count: %d dev: %p\n", task, + " cdb: %p, t_fe_count: %d dev: %p\n", cmd, fe_count, dev); cmd->transport_state |= CMD_T_ABORTED; spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -364,8 +345,8 @@ static void core_tmr_drain_task_list( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } - pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p," - " t_fe_count: %d dev: %p\n", task, fe_count, dev); + pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p," + " t_fe_count: %d dev: %p\n", cmd, fe_count, dev); cmd->transport_state |= CMD_T_ABORTED; spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -384,13 +365,11 @@ static void core_tmr_drain_cmd_list( struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_cmd *cmd, *tcmd; unsigned long flags; + /* - * Release all commands remaining in the struct se_device cmd queue. + * Release all commands remaining in the per-device command queue. * - * This follows the same logic as above for the struct se_device - * struct se_task state list, where commands are returned with - * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD - * reference, otherwise the struct se_cmd is released. + * This follows the same logic as above for the state list. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { @@ -466,7 +445,7 @@ int core_tmr_lun_reset( dev->transport->name, tas); core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); - core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 70c3ffb981e..8bd58e28418 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -60,7 +60,6 @@ static void core_clear_initiator_node_from_tpg( int i; struct se_dev_entry *deve; struct se_lun *lun; - struct se_lun_acl *acl, *acl_tmp; spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -81,28 +80,7 @@ static void core_clear_initiator_node_from_tpg( core_update_device_list_for_node(lun, NULL, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); - spin_lock(&lun->lun_acl_lock); - list_for_each_entry_safe(acl, acl_tmp, - &lun->lun_acl_list, lacl_list) { - if (!strcmp(acl->initiatorname, nacl->initiatorname) && - (acl->mapped_lun == deve->mapped_lun)) - break; - } - - if (!acl) { - pr_err("Unable to locate struct se_lun_acl for %s," - " mapped_lun: %u\n", nacl->initiatorname, - deve->mapped_lun); - spin_unlock(&lun->lun_acl_lock); - spin_lock_irq(&nacl->device_list_lock); - continue; - } - - list_del(&acl->lacl_list); - spin_unlock(&lun->lun_acl_lock); - spin_lock_irq(&nacl->device_list_lock); - kfree(acl); } spin_unlock_irq(&nacl->device_list_lock); } @@ -175,10 +153,7 @@ void core_tpg_add_node_to_devs( * demo_mode_write_protect is ON, or READ_ONLY; */ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { - if (dev->dev_flags & DF_READ_ONLY) - lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; - else - lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; + lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; } else { /* * Allow only optical drives to issue R/W in default RO diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 443704f84fd..b05fdc0c05d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -72,7 +72,6 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); -static void transport_free_dev_tasks(struct se_cmd *cmd); static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); @@ -331,9 +330,9 @@ void target_get_session(struct se_session *se_sess) } EXPORT_SYMBOL(target_get_session); -int target_put_session(struct se_session *se_sess) +void target_put_session(struct se_session *se_sess) { - return kref_put(&se_sess->sess_kref, target_release_session); + kref_put(&se_sess->sess_kref, target_release_session); } EXPORT_SYMBOL(target_put_session); @@ -444,31 +443,23 @@ EXPORT_SYMBOL(transport_deregister_session); /* * Called with cmd->t_state_lock held. */ -static void transport_all_task_dev_remove_state(struct se_cmd *cmd) +static void target_remove_from_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - struct se_task *task; unsigned long flags; if (!dev) return; - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (task->task_flags & TF_ACTIVE) - continue; - - spin_lock_irqsave(&dev->execute_task_lock, flags); - if (task->t_state_active) { - pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", - cmd->se_tfo->get_task_tag(cmd), dev, task); + if (cmd->transport_state & CMD_T_BUSY) + return; - list_del(&task->t_state_list); - atomic_dec(&cmd->t_task_cdbs_ex_left); - task->t_state_active = false; - } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->execute_task_lock, flags); + if (cmd->state_active) { + list_del(&cmd->state_list); + cmd->state_active = false; } - + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } /* transport_cmd_check_stop(): @@ -497,7 +488,7 @@ static int transport_cmd_check_stop( cmd->transport_state &= ~CMD_T_ACTIVE; if (transport_off == 2) - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&cmd->transport_lun_stop_comp); @@ -513,7 +504,7 @@ static int transport_cmd_check_stop( cmd->se_tfo->get_task_tag(cmd)); if (transport_off == 2) - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * Clear struct se_cmd->se_lun before the transport_off == 2 handoff @@ -529,7 +520,7 @@ static int transport_cmd_check_stop( if (transport_off) { cmd->transport_state &= ~CMD_T_ACTIVE; if (transport_off == 2) { - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * Clear struct se_cmd->se_lun before the transport_off == 2 * handoff to fabric module. @@ -577,7 +568,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_DEV_ACTIVE) { cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -669,29 +660,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); } -/* - * Completion function used by TCM subsystem plugins (such as FILEIO) - * for queueing up response from struct se_subsystem_api->do_task() - */ -void transport_complete_sync_cache(struct se_cmd *cmd, int good) -{ - struct se_task *task = list_entry(cmd->t_task_list.next, - struct se_task, t_list); - - if (good) { - cmd->scsi_status = SAM_STAT_GOOD; - task->task_scsi_status = GOOD; - } else { - task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_se_cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - - } - - transport_complete_task(task, good); -} -EXPORT_SYMBOL(transport_complete_sync_cache); - static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -699,40 +667,32 @@ static void target_complete_failure_work(struct work_struct *work) transport_generic_request_failure(cmd); } -/* transport_complete_task(): - * - * Called from interrupt and non interrupt context depending - * on the transport plugin. - */ -void transport_complete_task(struct se_task *task, int success) +void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { - struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; + int success = scsi_status == GOOD; unsigned long flags; + cmd->scsi_status = scsi_status; + + spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; + cmd->transport_state &= ~CMD_T_BUSY; - /* - * See if any sense data exists, if so set the TASK_SENSE flag. - * Also check for any other post completion work that needs to be - * done by the plugins. - */ if (dev && dev->transport->transport_complete) { - if (dev->transport->transport_complete(task) != 0) { + if (dev->transport->transport_complete(cmd, + cmd->t_data_sg) != 0) { cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; - task->task_flags |= TF_HAS_SENSE; success = 1; } } /* - * See if we are waiting for outstanding struct se_task - * to complete for an exception condition + * See if we are waiting to complete for an exception condition. */ - if (task->task_flags & TF_REQUEST_STOP) { + if (cmd->transport_state & CMD_T_REQUEST_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); + complete(&cmd->task_stop_comp); return; } @@ -740,15 +700,6 @@ void transport_complete_task(struct se_task *task, int success) cmd->transport_state |= CMD_T_FAILED; /* - * Decrement the outstanding t_task_cdbs_left count. The last - * struct se_task from struct se_cmd will complete itself into the - * device queue depending upon int success. - */ - if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - /* * Check for case where an explict ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ @@ -770,157 +721,77 @@ void transport_complete_task(struct se_task *task, int success) queue_work(target_completion_wq, &cmd->work); } -EXPORT_SYMBOL(transport_complete_task); - -/* - * Called by transport_add_tasks_from_cmd() once a struct se_cmd's - * struct se_task list are ready to be added to the active execution list - * struct se_device +EXPORT_SYMBOL(target_complete_cmd); - * Called with se_dev_t->execute_task_lock called. - */ -static inline int transport_add_task_check_sam_attr( - struct se_task *task, - struct se_task *task_prev, - struct se_device *dev) +static void target_add_to_state_list(struct se_cmd *cmd) { - /* - * No SAM Task attribute emulation enabled, add to tail of - * execution queue - */ - if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { - list_add_tail(&task->t_execute_list, &dev->execute_task_list); - return 0; - } - /* - * HEAD_OF_QUEUE attribute for received CDB, which means - * the first task that is associated with a struct se_cmd goes to - * head of the struct se_device->execute_task_list, and task_prev - * after that for each subsequent task - */ - if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { - list_add(&task->t_execute_list, - (task_prev != NULL) ? - &task_prev->t_execute_list : - &dev->execute_task_list); - - pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" - " in execution queue\n", - task->task_se_cmd->t_task_cdb[0]); - return 1; + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + if (!cmd->state_active) { + list_add_tail(&cmd->state_list, &dev->state_list); + cmd->state_active = true; } - /* - * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been - * transitioned from Dermant -> Active state, and are added to the end - * of the struct se_device->execute_task_list - */ - list_add_tail(&task->t_execute_list, &dev->execute_task_list); - return 0; + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -/* __transport_add_task_to_execute_queue(): - * - * Called with se_dev_t->execute_task_lock called. - */ -static void __transport_add_task_to_execute_queue( - struct se_task *task, - struct se_task *task_prev, - struct se_device *dev) +static void __target_add_to_execute_list(struct se_cmd *cmd) { - int head_of_queue; - - head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); - atomic_inc(&dev->execute_tasks); + struct se_device *dev = cmd->se_dev; + bool head_of_queue = false; - if (task->t_state_active) + if (!list_empty(&cmd->execute_list)) return; - /* - * Determine if this task needs to go to HEAD_OF_QUEUE for the - * state list as well. Running with SAM Task Attribute emulation - * will always return head_of_queue == 0 here - */ - if (head_of_queue) - list_add(&task->t_state_list, (task_prev) ? - &task_prev->t_state_list : - &dev->state_task_list); - else - list_add_tail(&task->t_state_list, &dev->state_task_list); - task->t_state_active = true; + if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED && + cmd->sam_task_attr == MSG_HEAD_TAG) + head_of_queue = true; - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), - task, dev); -} + if (head_of_queue) + list_add(&cmd->execute_list, &dev->execute_list); + else + list_add_tail(&cmd->execute_list, &dev->execute_list); -static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_task *task; - unsigned long flags; + atomic_inc(&dev->execute_tasks); - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry(task, &cmd->t_task_list, t_list) { - spin_lock(&dev->execute_task_lock); - if (!task->t_state_active) { - list_add_tail(&task->t_state_list, - &dev->state_task_list); - task->t_state_active = true; - - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag( - task->task_se_cmd), task, dev); - } - spin_unlock(&dev->execute_task_lock); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} + if (cmd->state_active) + return; -static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_task *task, *task_prev = NULL; + if (head_of_queue) + list_add(&cmd->state_list, &dev->state_list); + else + list_add_tail(&cmd->state_list, &dev->state_list); - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (!list_empty(&task->t_execute_list)) - continue; - /* - * __transport_add_task_to_execute_queue() handles the - * SAM Task Attribute emulation if enabled - */ - __transport_add_task_to_execute_queue(task, task_prev, dev); - task_prev = task; - } + cmd->state_active = true; } -static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +static void target_add_to_execute_list(struct se_cmd *cmd) { unsigned long flags; struct se_device *dev = cmd->se_dev; spin_lock_irqsave(&dev->execute_task_lock, flags); - __transport_add_tasks_from_cmd(cmd); + __target_add_to_execute_list(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -void __transport_remove_task_from_execute_queue(struct se_task *task, - struct se_device *dev) +void __target_remove_from_execute_list(struct se_cmd *cmd) { - list_del_init(&task->t_execute_list); - atomic_dec(&dev->execute_tasks); + list_del_init(&cmd->execute_list); + atomic_dec(&cmd->se_dev->execute_tasks); } -static void transport_remove_task_from_execute_queue( - struct se_task *task, - struct se_device *dev) +static void target_remove_from_execute_list(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; unsigned long flags; - if (WARN_ON(list_empty(&task->t_execute_list))) + if (WARN_ON(list_empty(&cmd->execute_list))) return; spin_lock_irqsave(&dev->execute_task_lock, flags); - __transport_remove_task_from_execute_queue(task, dev); + __target_remove_from_execute_list(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -999,8 +870,9 @@ void transport_dump_dev_state( *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", atomic_read(&dev->execute_tasks), dev->queue_depth); - *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); + *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", + dev->se_sub_dev->se_dev_attrib.block_size, + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); *bl += sprintf(b + *bl, " "); } @@ -1344,9 +1216,9 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); - INIT_LIST_HEAD(&dev->execute_task_list); + INIT_LIST_HEAD(&dev->execute_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->state_task_list); + INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); @@ -1457,6 +1329,7 @@ static inline void transport_generic_prepare_cdb( case VERIFY_16: /* SBC - VRProtect */ case WRITE_VERIFY: /* SBC - VRProtect */ case WRITE_VERIFY_12: /* SBC - VRProtect */ + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ break; default: cdb[1] &= 0x1f; /* clear logical unit number */ @@ -1464,29 +1337,6 @@ static inline void transport_generic_prepare_cdb( } } -static struct se_task * -transport_generic_get_task(struct se_cmd *cmd, - enum dma_data_direction data_direction) -{ - struct se_task *task; - struct se_device *dev = cmd->se_dev; - - task = dev->transport->alloc_task(cmd->t_task_cdb); - if (!task) { - pr_err("Unable to allocate struct se_task\n"); - return NULL; - } - - INIT_LIST_HEAD(&task->t_list); - INIT_LIST_HEAD(&task->t_execute_list); - INIT_LIST_HEAD(&task->t_state_list); - init_completion(&task->task_stop_comp); - task->task_se_cmd = cmd; - task->task_data_direction = data_direction; - - return task; -} - static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); /* @@ -1507,11 +1357,13 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); - INIT_LIST_HEAD(&cmd->t_task_list); + INIT_LIST_HEAD(&cmd->execute_list); + INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); + init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); cmd->transport_state = CMD_T_DEV_ACTIVE; @@ -1521,6 +1373,8 @@ void transport_init_se_cmd( cmd->data_direction = data_direction; cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; + + cmd->state_active = false; } EXPORT_SYMBOL(transport_init_se_cmd); @@ -1550,11 +1404,11 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -/* transport_generic_allocate_tasks(): +/* target_setup_cmd_from_cdb(): * * Called from fabric RX Thread. */ -int transport_generic_allocate_tasks( +int target_setup_cmd_from_cdb( struct se_cmd *cmd, unsigned char *cdb) { @@ -1620,7 +1474,7 @@ int transport_generic_allocate_tasks( spin_unlock(&cmd->se_lun->lun_sep_lock); return 0; } -EXPORT_SYMBOL(transport_generic_allocate_tasks); +EXPORT_SYMBOL(target_setup_cmd_from_cdb); /* * Used by fabric module frontends to queue tasks directly. @@ -1701,6 +1555,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, data_dir, task_attr, sense); + if (flags & TARGET_SCF_UNKNOWN_SIZE) + se_cmd->unknown_data_length = 1; /* * Obtain struct se_cmd->cmd_kref reference and add new cmd to * se_sess->sess_cmd_list. A second kref_get here is necessary @@ -1726,11 +1582,18 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, * Sanitize CDBs via transport_generic_cmd_sequencer() and * allocate the necessary tasks to complete the received CDB+data */ - rc = transport_generic_allocate_tasks(se_cmd, cdb); + rc = target_setup_cmd_from_cdb(se_cmd, cdb); if (rc != 0) { transport_generic_request_failure(se_cmd); return; } + + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(se_cmd); + /* * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend * for immediate execution of READs, otherwise wait for @@ -1872,72 +1735,30 @@ int transport_generic_handle_tmr( EXPORT_SYMBOL(transport_generic_handle_tmr); /* - * If the task is active, request it to be stopped and sleep until it + * If the cmd is active, request it to be stopped and sleep until it * has completed. */ -bool target_stop_task(struct se_task *task, unsigned long *flags) +bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) { - struct se_cmd *cmd = task->task_se_cmd; bool was_active = false; - if (task->task_flags & TF_ACTIVE) { - task->task_flags |= TF_REQUEST_STOP; + if (cmd->transport_state & CMD_T_BUSY) { + cmd->transport_state |= CMD_T_REQUEST_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - pr_debug("Task %p waiting to complete\n", task); - wait_for_completion(&task->task_stop_comp); - pr_debug("Task %p stopped successfully\n", task); + pr_debug("cmd %p waiting to complete\n", cmd); + wait_for_completion(&cmd->task_stop_comp); + pr_debug("cmd %p stopped successfully\n", cmd); spin_lock_irqsave(&cmd->t_state_lock, *flags); - atomic_dec(&cmd->t_task_cdbs_left); - task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); + cmd->transport_state &= ~CMD_T_REQUEST_STOP; + cmd->transport_state &= ~CMD_T_BUSY; was_active = true; } return was_active; } -static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) -{ - struct se_task *task, *task_tmp; - unsigned long flags; - int ret = 0; - - pr_debug("ITT[0x%08x] - Stopping tasks\n", - cmd->se_tfo->get_task_tag(cmd)); - - /* - * No tasks remain in the execution queue - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - pr_debug("Processing task %p\n", task); - /* - * If the struct se_task has not been sent and is not active, - * remove the struct se_task from the execution queue. - */ - if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - transport_remove_task_from_execute_queue(task, - cmd->se_dev); - - pr_debug("Task %p removed from execute queue\n", task); - spin_lock_irqsave(&cmd->t_state_lock, flags); - continue; - } - - if (!target_stop_task(task, &flags)) { - pr_debug("Task %p - did nothing\n", task); - ret++; - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - return ret; -} - /* * Handle SAM-esque emulation for generic transport request failures. */ @@ -1951,13 +1772,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->scsi_sense_reason); - pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" - " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" - " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_task_cdbs_ex_left), + pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", (cmd->transport_state & CMD_T_ACTIVE) != 0, (cmd->transport_state & CMD_T_STOP) != 0, (cmd->transport_state & CMD_T_SENT) != 0); @@ -2156,7 +1971,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * Called from fabric module context in transport_generic_new_cmd() and * transport_generic_process_write() */ -static int transport_execute_tasks(struct se_cmd *cmd) +static void transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; struct se_device *se_dev = cmd->se_dev; @@ -2170,71 +1985,52 @@ static int transport_execute_tasks(struct se_cmd *cmd) * attribute for the tasks of the received struct se_cmd CDB */ add_tasks = transport_execute_task_attr(cmd); - if (!add_tasks) - goto execute_tasks; - /* - * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() - * adds associated se_tasks while holding dev->execute_task_lock - * before I/O dispath to avoid a double spinlock access. - */ - __transport_execute_tasks(se_dev, cmd); - return 0; + if (add_tasks) { + __transport_execute_tasks(se_dev, cmd); + return; + } } - -execute_tasks: __transport_execute_tasks(se_dev, NULL); - return 0; } -/* - * Called to check struct se_device tcq depth window, and once open pull struct se_task - * from struct se_device->execute_task_list and - * - * Called from transport_processing_thread() - */ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) { int error; struct se_cmd *cmd = NULL; - struct se_task *task = NULL; unsigned long flags; check_depth: spin_lock_irq(&dev->execute_task_lock); if (new_cmd != NULL) - __transport_add_tasks_from_cmd(new_cmd); + __target_add_to_execute_list(new_cmd); - if (list_empty(&dev->execute_task_list)) { + if (list_empty(&dev->execute_list)) { spin_unlock_irq(&dev->execute_task_lock); return 0; } - task = list_first_entry(&dev->execute_task_list, - struct se_task, t_execute_list); - __transport_remove_task_from_execute_queue(task, dev); + cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list); + __target_remove_from_execute_list(cmd); spin_unlock_irq(&dev->execute_task_lock); - cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags |= (TF_ACTIVE | TF_SENT); - atomic_inc(&cmd->t_task_cdbs_sent); - - if (atomic_read(&cmd->t_task_cdbs_sent) == - cmd->t_task_list_num) - cmd->transport_state |= CMD_T_SENT; + cmd->transport_state |= CMD_T_BUSY; + cmd->transport_state |= CMD_T_SENT; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->execute_task) - error = cmd->execute_task(task); - else - error = dev->transport->do_task(task); + if (cmd->execute_cmd) + error = cmd->execute_cmd(cmd); + else { + error = dev->transport->execute_cmd(cmd, cmd->t_data_sg, + cmd->t_data_nents, cmd->data_direction); + } + if (error != 0) { spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; + cmd->transport_state &= ~CMD_T_BUSY; cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd); } @@ -2392,12 +2188,12 @@ static inline u32 transport_get_size( } else /* bytes */ return sectors; } -#if 0 + pr_debug("Returning block_size: %u, sectors: %u == %u for" - " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, - dev->se_sub_dev->se_dev_attrib.block_size * sectors, - dev->transport->name); -#endif + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, + sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } @@ -2462,7 +2258,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) { unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; struct se_device *dev = cmd->se_dev; - struct se_task *task = NULL, *task_tmp; unsigned long flags; u32 offset = 0; @@ -2477,44 +2272,37 @@ static int transport_get_sense_data(struct se_cmd *cmd) return 0; } - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - if (!(task->task_flags & TF_HAS_SENSE)) - continue; - - if (!dev->transport->get_sense_buffer) { - pr_err("dev->transport->get_sense_buffer" - " is NULL\n"); - continue; - } - - sense_buffer = dev->transport->get_sense_buffer(task); - if (!sense_buffer) { - pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" - " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), task); - continue; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) + goto out; - offset = cmd->se_tfo->set_fabric_sense_len(cmd, - TRANSPORT_SENSE_BUFFER); + if (!dev->transport->get_sense_buffer) { + pr_err("dev->transport->get_sense_buffer is NULL\n"); + goto out; + } - memcpy(&buffer[offset], sense_buffer, - TRANSPORT_SENSE_BUFFER); - cmd->scsi_status = task->task_scsi_status; - /* Automatically padded */ - cmd->scsi_sense_length = - (TRANSPORT_SENSE_BUFFER + offset); - - pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" - " and sense\n", - dev->se_hba->hba_id, dev->transport->name, - cmd->scsi_status); - return 0; + sense_buffer = dev->transport->get_sense_buffer(cmd); + if (!sense_buffer) { + pr_err("ITT 0x%08x cmd %p: Unable to locate" + " sense buffer for task with sense\n", + cmd->se_tfo->get_task_tag(cmd), cmd); + goto out; } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); + + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); + + /* Automatically padded */ + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); + return 0; + +out: + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return -1; } @@ -2581,7 +2369,7 @@ static int target_check_write_same_discard(unsigned char *flags, struct se_devic * Generic Command Sequencer that should work for most DAS transport * drivers. * - * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD + * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD * RX Thread. * * FIXME: Need to support other SCSI OPCODES where as well. @@ -2615,11 +2403,10 @@ static int transport_generic_cmd_sequencer( * by the ALUA primary or secondary access state.. */ if (ret > 0) { -#if 0 pr_debug("[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", cmd->se_tfo->get_fabric_name(), alua_ascq); -#endif + transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; @@ -2695,6 +2482,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: + case WRITE_VERIFY: sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; @@ -2796,7 +2584,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[10], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; default: pr_err("VARIABLE_LENGTH_CMD service action" @@ -2810,9 +2598,9 @@ static int transport_generic_cmd_sequencer( /* * Check for emulated MI_REPORT_TARGET_PGS. */ - if (cdb[1] == MI_REPORT_TARGET_PGS && + if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_task = + cmd->execute_cmd = target_emulate_report_target_port_groups; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -2835,13 +2623,13 @@ static int transport_generic_cmd_sequencer( size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_modesense; + cmd->execute_cmd = target_emulate_modesense; break; case MODE_SENSE_10: size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_modesense; + cmd->execute_cmd = target_emulate_modesense; break; case GPCMD_READ_BUFFER_CAPACITY: case GPCMD_SEND_OPC: @@ -2863,13 +2651,13 @@ static int transport_generic_cmd_sequencer( break; case PERSISTENT_RESERVE_IN: if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_task = target_scsi3_emulate_pr_in; + cmd->execute_cmd = target_scsi3_emulate_pr_in; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case PERSISTENT_RESERVE_OUT: if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) - cmd->execute_task = target_scsi3_emulate_pr_out; + cmd->execute_cmd = target_scsi3_emulate_pr_out; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; @@ -2890,7 +2678,7 @@ static int transport_generic_cmd_sequencer( */ if (cdb[1] == MO_SET_TARGET_PGS && su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { - cmd->execute_task = + cmd->execute_cmd = target_emulate_set_target_port_groups; } @@ -2912,7 +2700,7 @@ static int transport_generic_cmd_sequencer( cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_inquiry; + cmd->execute_cmd = target_emulate_inquiry; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; @@ -2922,7 +2710,7 @@ static int transport_generic_cmd_sequencer( size = READ_CAP_LEN; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_readcapacity; + cmd->execute_cmd = target_emulate_readcapacity; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: @@ -2934,7 +2722,7 @@ static int transport_generic_cmd_sequencer( switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: if (!passthrough) - cmd->execute_task = + cmd->execute_cmd = target_emulate_readcapacity_16; break; default: @@ -2977,7 +2765,7 @@ static int transport_generic_cmd_sequencer( size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_request_sense; + cmd->execute_cmd = target_emulate_request_sense; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; @@ -3006,7 +2794,7 @@ static int transport_generic_cmd_sequencer( * emulation disabled. */ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_task = target_scsi2_reservation_reserve; + cmd->execute_cmd = target_scsi2_reservation_reserve; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -3021,7 +2809,7 @@ static int transport_generic_cmd_sequencer( size = cmd->data_length; if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) - cmd->execute_task = target_scsi2_reservation_release; + cmd->execute_cmd = target_scsi2_reservation_release; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3053,13 +2841,13 @@ static int transport_generic_cmd_sequencer( if (transport_cmd_get_valid_sectors(cmd) < 0) goto out_invalid_cdb_field; } - cmd->execute_task = target_emulate_synchronize_cache; + cmd->execute_cmd = target_emulate_synchronize_cache; break; case UNMAP: size = get_unaligned_be16(&cdb[7]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (!passthrough) - cmd->execute_task = target_emulate_unmap; + cmd->execute_cmd = target_emulate_unmap; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); @@ -3079,7 +2867,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; case WRITE_SAME: sectors = transport_get_sectors_10(cdb, cmd, §or_ret); @@ -3102,7 +2890,7 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_unsupported_cdb; if (!passthrough) - cmd->execute_task = target_emulate_write_same; + cmd->execute_cmd = target_emulate_write_same; break; case ALLOW_MEDIUM_REMOVAL: case ERASE: @@ -3115,7 +2903,7 @@ static int transport_generic_cmd_sequencer( case WRITE_FILEMARKS: cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; if (!passthrough) - cmd->execute_task = target_emulate_noop; + cmd->execute_cmd = target_emulate_noop; break; case GPCMD_CLOSE_TRACK: case INITIALIZE_ELEMENT_STATUS: @@ -3125,7 +2913,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case REPORT_LUNS: - cmd->execute_task = target_report_luns; + cmd->execute_cmd = target_report_luns; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS @@ -3135,6 +2923,42 @@ static int transport_generic_cmd_sequencer( cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; + case GET_EVENT_STATUS_NOTIFICATION: + size = (cdb[7] << 8) | cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; + case ATA_16: + /* Only support ATA passthrough to pSCSI backends.. */ + if (!passthrough) + goto out_unsupported_cdb; + + /* T_LENGTH */ + switch (cdb[2] & 0x3) { + case 0x0: + sectors = 0; + break; + case 0x1: + sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4]; + break; + case 0x2: + sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6]; + break; + case 0x3: + pr_err("T_LENGTH=0x3 not supported for ATA_16\n"); + goto out_invalid_cdb_field; + } + + /* BYTE_BLOCK */ + if (cdb[2] & 0x4) { + /* BLOCK T_TYPE: 512 or sector */ + size = sectors * ((cdb[2] & 0x10) ? + dev->se_sub_dev->se_dev_attrib.block_size : 512); + } else { + /* BYTE */ + size = sectors; + } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; default: pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", @@ -3142,6 +2966,9 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; } + if (cmd->unknown_data_length) + cmd->data_length = size; + if (size != cmd->data_length) { pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" @@ -3177,15 +3004,25 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB && - sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n", - cdb[0], sectors); - goto out_invalid_cdb_field; + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds fabric_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.fabric_max_sectors); + goto out_invalid_cdb_field; + } + if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { + printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" + " big sectors %u exceeds backend hw_max_sectors:" + " %u\n", cdb[0], sectors, + su_dev->se_dev_attrib.hw_max_sectors); + goto out_invalid_cdb_field; + } } /* reject any command that we don't have a handler for */ - if (!(passthrough || cmd->execute_task || + if (!(passthrough || cmd->execute_cmd || (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) goto out_unsupported_cdb; @@ -3250,7 +3087,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd_p->t_task_cdb[0], cmd_p->sam_task_attr, cmd_p->se_ordered_id); - transport_add_tasks_from_cmd(cmd_p); + target_add_to_execute_list(cmd_p); new_active_tasks++; spin_lock(&dev->delayed_cmd_lock); @@ -3346,10 +3183,6 @@ static void target_complete_ok_work(struct work_struct *work) if (transport_get_sense_data(cmd) < 0) reason = TCM_NON_EXISTENT_LUN; - /* - * Only set when an struct se_task->task_scsi_status returned - * a non GOOD status. - */ if (cmd->scsi_status) { ret = transport_send_check_condition_and_sense( cmd, reason, 1); @@ -3424,33 +3257,6 @@ queue_full: transport_handle_queue_full(cmd, cmd->se_dev); } -static void transport_free_dev_tasks(struct se_cmd *cmd) -{ - struct se_task *task, *task_tmp; - unsigned long flags; - LIST_HEAD(dispose_list); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) { - if (!(task->task_flags & TF_ACTIVE)) - list_move_tail(&task->t_list, &dispose_list); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - while (!list_empty(&dispose_list)) { - task = list_first_entry(&dispose_list, struct se_task, t_list); - - if (task->task_sg != cmd->t_data_sg && - task->task_sg != cmd->t_bidi_data_sg) - kfree(task->task_sg); - - list_del(&task->t_list); - - cmd->se_dev->transport->free_task(task); - } -} - static inline void transport_free_sgl(struct scatterlist *sgl, int nents) { struct scatterlist *sg; @@ -3511,7 +3317,6 @@ static void transport_release_cmd(struct se_cmd *cmd) static void transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; - int free_tasks = 0; spin_lock_irqsave(&cmd->t_state_lock, flags); if (atomic_read(&cmd->t_fe_count)) { @@ -3519,21 +3324,12 @@ static void transport_put_cmd(struct se_cmd *cmd) goto out_busy; } - if (atomic_read(&cmd->t_se_count)) { - if (!atomic_dec_and_test(&cmd->t_se_count)) - goto out_busy; - } - if (cmd->transport_state & CMD_T_DEV_ACTIVE) { cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); - free_tasks = 1; + target_remove_from_state_list(cmd); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (free_tasks != 0) - transport_free_dev_tasks(cmd); - transport_free_pages(cmd); transport_release_cmd(cmd); return; @@ -3683,245 +3479,14 @@ out: return -ENOMEM; } -/* Reduce sectors if they are too long for the device */ -static inline sector_t transport_limit_task_sectors( - struct se_device *dev, - unsigned long long lba, - sector_t sectors) -{ - sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); - - if (dev->transport->get_device_type(dev) == TYPE_DISK) - if ((lba + sectors) > transport_dev_end_lba(dev)) - sectors = ((transport_dev_end_lba(dev) - lba) + 1); - - return sectors; -} - - -/* - * This function can be used by HW target mode drivers to create a linked - * scatterlist from all contiguously allocated struct se_task->task_sg[]. - * This is intended to be called during the completion path by TCM Core - * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. - */ -void transport_do_task_sg_chain(struct se_cmd *cmd) -{ - struct scatterlist *sg_first = NULL; - struct scatterlist *sg_prev = NULL; - int sg_prev_nents = 0; - struct scatterlist *sg; - struct se_task *task; - u32 chained_nents = 0; - int i; - - BUG_ON(!cmd->se_tfo->task_sg_chaining); - - /* - * Walk the struct se_task list and setup scatterlist chains - * for each contiguously allocated struct se_task->task_sg[]. - */ - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (!task->task_sg) - continue; - - if (!sg_first) { - sg_first = task->task_sg; - chained_nents = task->task_sg_nents; - } else { - sg_chain(sg_prev, sg_prev_nents, task->task_sg); - chained_nents += task->task_sg_nents; - } - /* - * For the padded tasks, use the extra SGL vector allocated - * in transport_allocate_data_tasks() for the sg_prev_nents - * offset into sg_chain() above. - * - * We do not need the padding for the last task (or a single - * task), but in that case we will never use the sg_prev_nents - * value below which would be incorrect. - */ - sg_prev_nents = (task->task_sg_nents + 1); - sg_prev = task->task_sg; - } - /* - * Setup the starting pointer and total t_tasks_sg_linked_no including - * padding SGs for linking and to mark the end. - */ - cmd->t_tasks_sg_chained = sg_first; - cmd->t_tasks_sg_chained_no = chained_nents; - - pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, - cmd->t_tasks_sg_chained_no); - - for_each_sg(cmd->t_tasks_sg_chained, sg, - cmd->t_tasks_sg_chained_no, i) { - - pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", - i, sg, sg_page(sg), sg->length, sg->offset); - if (sg_is_chain(sg)) - pr_debug("SG: %p sg_is_chain=1\n", sg); - if (sg_is_last(sg)) - pr_debug("SG: %p sg_is_last=1\n", sg); - } -} -EXPORT_SYMBOL(transport_do_task_sg_chain); - -/* - * Break up cmd into chunks transport can handle - */ -static int -transport_allocate_data_tasks(struct se_cmd *cmd, - enum dma_data_direction data_direction, - struct scatterlist *cmd_sg, unsigned int sgl_nents) -{ - struct se_device *dev = cmd->se_dev; - int task_count, i; - unsigned long long lba; - sector_t sectors, dev_max_sectors; - u32 sector_size; - - if (transport_cmd_get_valid_sectors(cmd) < 0) - return -EINVAL; - - dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - sector_size = dev->se_sub_dev->se_dev_attrib.block_size; - - WARN_ON(cmd->data_length % sector_size); - - lba = cmd->t_task_lba; - sectors = DIV_ROUND_UP(cmd->data_length, sector_size); - task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); - - /* - * If we need just a single task reuse the SG list in the command - * and avoid a lot of work. - */ - if (task_count == 1) { - struct se_task *task; - unsigned long flags; - - task = transport_generic_get_task(cmd, data_direction); - if (!task) - return -ENOMEM; - - task->task_sg = cmd_sg; - task->task_sg_nents = sgl_nents; - - task->task_lba = lba; - task->task_sectors = sectors; - task->task_size = task->task_sectors * sector_size; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - return task_count; - } - - for (i = 0; i < task_count; i++) { - struct se_task *task; - unsigned int task_size, task_sg_nents_padded; - struct scatterlist *sg; - unsigned long flags; - int count; - - task = transport_generic_get_task(cmd, data_direction); - if (!task) - return -ENOMEM; - - task->task_lba = lba; - task->task_sectors = min(sectors, dev_max_sectors); - task->task_size = task->task_sectors * sector_size; - - /* - * This now assumes that passed sg_ents are in PAGE_SIZE chunks - * in order to calculate the number per task SGL entries - */ - task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); - /* - * Check if the fabric module driver is requesting that all - * struct se_task->task_sg[] be chained together.. If so, - * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL for every task except - * the last one for (task_count > 1) operation, or skipping - * the extra padding for the (task_count == 1) case. - */ - if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { - task_sg_nents_padded = (task->task_sg_nents + 1); - } else - task_sg_nents_padded = task->task_sg_nents; - - task->task_sg = kmalloc(sizeof(struct scatterlist) * - task_sg_nents_padded, GFP_KERNEL); - if (!task->task_sg) { - cmd->se_dev->transport->free_task(task); - return -ENOMEM; - } - - sg_init_table(task->task_sg, task_sg_nents_padded); - - task_size = task->task_size; - - /* Build new sgl, only up to task_size */ - for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { - if (cmd_sg->length > task_size) - break; - - *sg = *cmd_sg; - task_size -= cmd_sg->length; - cmd_sg = sg_next(cmd_sg); - } - - lba += task->task_sectors; - sectors -= task->task_sectors; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - } - - return task_count; -} - -static int -transport_allocate_control_task(struct se_cmd *cmd) -{ - struct se_task *task; - unsigned long flags; - - /* Workaround for handling zero-length control CDBs */ - if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && - !cmd->data_length) - return 0; - - task = transport_generic_get_task(cmd, cmd->data_direction); - if (!task) - return -ENOMEM; - - task->task_sg = cmd->t_data_sg; - task->task_size = cmd->data_length; - task->task_sg_nents = cmd->t_data_nents; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - /* Success! Return number of tasks allocated */ - return 1; -} - /* - * Allocate any required ressources to execute the command, and either place - * it on the execution queue if possible. For writes we might not have the - * payload yet, thus notify the fabric via a call to ->write_pending instead. + * Allocate any required resources to execute the command. For writes we + * might not have the payload yet, so notify the fabric via a call to + * ->write_pending instead. Otherwise place it on the execution queue. */ int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - int task_cdbs, task_cdbs_bidi = 0; - int set_counts = 1; int ret = 0; /* @@ -3936,35 +3501,9 @@ int transport_generic_new_cmd(struct se_cmd *cmd) goto out_fail; } - /* - * For BIDI command set up the read tasks first. - */ - if (cmd->t_bidi_data_sg && - dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { - BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); - - task_cdbs_bidi = transport_allocate_data_tasks(cmd, - DMA_FROM_DEVICE, cmd->t_bidi_data_sg, - cmd->t_bidi_data_nents); - if (task_cdbs_bidi <= 0) - goto out_fail; - - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - set_counts = 0; - } - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - task_cdbs = transport_allocate_data_tasks(cmd, - cmd->data_direction, cmd->t_data_sg, - cmd->t_data_nents); - } else { - task_cdbs = transport_allocate_control_task(cmd); - } - - if (task_cdbs < 0) - goto out_fail; - else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + /* Workaround for handling zero-length control CDBs */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + !cmd->data_length) { spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= CMD_T_ACTIVE; @@ -3982,29 +3521,31 @@ int transport_generic_new_cmd(struct se_cmd *cmd) return 0; } - if (set_counts) { - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib; + + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + + BUG_ON(cmd->data_length % attr->block_size); + BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) > + attr->hw_max_sectors); } - cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); - atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); - atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); + atomic_inc(&cmd->t_fe_count); /* - * For WRITEs, let the fabric know its buffer is ready.. - * This WRITE struct se_cmd (and all of its associated struct se_task's) - * will be added to the struct se_device execution queue after its WRITE - * data has arrived. (ie: It gets handled by the transport processing - * thread a second time) + * For WRITEs, let the fabric know its buffer is ready. + * + * The command will be added to the execution queue after its write + * data has arrived. */ if (cmd->data_direction == DMA_TO_DEVICE) { - transport_add_tasks_to_state_queue(cmd); + target_add_to_state_list(cmd); return transport_generic_write_pending(cmd); } /* - * Everything else but a WRITE, add the struct se_cmd's struct se_task's - * to the execution queue. + * Everything else but a WRITE, add the command to the execution queue. */ transport_execute_tasks(cmd); return 0; @@ -4091,8 +3632,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) if (cmd->se_lun) transport_lun_remove_cmd(cmd); - transport_free_dev_tasks(cmd); - transport_put_cmd(cmd); } } @@ -4233,7 +3772,8 @@ EXPORT_SYMBOL(target_wait_for_sess_cmds); static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) { unsigned long flags; - int ret; + int ret = 0; + /* * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. @@ -4253,10 +3793,21 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - ret = transport_stop_tasks_for_cmd(cmd); + // XXX: audit task_flags checks. + spin_lock_irqsave(&cmd->t_state_lock, flags); + if ((cmd->transport_state & CMD_T_BUSY) && + (cmd->transport_state & CMD_T_SENT)) { + if (!target_stop_cmd(cmd, &flags)) + ret++; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } else { + spin_unlock_irqrestore(&cmd->t_state_lock, + flags); + target_remove_from_execute_list(cmd); + } - pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task_list_num, ret); + pr_debug("ConfigFS: cmd: %p stop tasks ret:" + " %d\n", cmd, ret); if (!ret) { pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); @@ -4328,10 +3879,9 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) goto check_cond; } cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - transport_free_dev_tasks(cmd); /* * The Storage engine stopped this struct se_cmd before it was * send to the fabric frontend for delivery back to the @@ -4444,7 +3994,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) wait_for_completion(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); + target_remove_from_state_list(cmd); /* * At this point, the frontend who was the originator of this * struct se_cmd, now owns the structure and can be released through @@ -4710,12 +4260,12 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; -#if 0 + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -#endif + cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; cmd->se_tfo->queue_status(cmd); ret = 1; @@ -4748,11 +4298,11 @@ void transport_send_task_abort(struct se_cmd *cmd) } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; -#if 0 + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," " ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); -#endif + cmd->se_tfo->queue_status(cmd); } @@ -4865,7 +4415,7 @@ get_cmd: } out: - WARN_ON(!list_empty(&dev->state_task_list)); + WARN_ON(!list_empty(&dev->state_list)); WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); dev->process_thread = NULL; return 0; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index a375f257aab..f03fb9730f5 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -215,20 +215,10 @@ int ft_write_pending(struct se_cmd *se_cmd) */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { - if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - /* - * cmd may have been broken up into multiple - * tasks. Link their sgs together so we can - * operate on them all at once. - */ - transport_do_task_sg_chain(se_cmd); - cmd->sg = se_cmd->t_tasks_sg_chained; - cmd->sg_cnt = - se_cmd->t_tasks_sg_chained_no; - } - if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, - cmd->sg, - cmd->sg_cnt)) + if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) && + lport->tt.ddp_target(lport, ep->xid, + se_cmd->t_data_sg, + se_cmd->t_data_nents)) cmd->was_ddp_setup = 1; } } diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 2948dc94461..9501844fae2 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -576,9 +576,6 @@ int ft_register_configfs(void) } fabric->tf_ops = ft_fabric_ops; - /* Allowing support for task_sg_chaining */ - fabric->tf_ops.task_sg_chaining = 1; - /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index dc7c0db26e2..071a505f98f 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -228,7 +228,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) "payload, Frame will be dropped if" "'Sequence Initiative' bit in f_ctl is" "not set\n", __func__, ep->xid, f_ctl, - cmd->sg, cmd->sg_cnt); + se_cmd->t_data_sg, se_cmd->t_data_nents); /* * Invalidate HW DDP context if it was setup for respective * command. Invalidation of HW DDP context is requited in both |