diff options
Diffstat (limited to 'drivers/target')
33 files changed, 3953 insertions, 5425 deletions
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig index 57dcbc2d711..abe8ecbcdf0 100644 --- a/drivers/target/loopback/Kconfig +++ b/drivers/target/loopback/Kconfig @@ -3,9 +3,3 @@ config LOOPBACK_TARGET help Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD fabric loopback module. - -config LOOPBACK_TARGET_CDB_DEBUG - bool "TCM loopback fabric module CDB debug code" - depends on LOOPBACK_TARGET - help - Say Y here to enable the TCM loopback fabric module CDB debug code diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 70c2e7fa666..aa2d6799723 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -31,7 +31,6 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> -#include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_transport.h> @@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); if (!tl_cmd) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); + pr_err("Unable to allocate struct tcm_loop_cmd\n"); set_host_byte(sc, DID_ERROR); return NULL; } @@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) - T_TASK(se_cmd)->t_tasks_bidi = 1; + se_cmd->t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ - if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) { + if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); set_host_byte(sc, DID_NO_CONNECT); return NULL; } - transport_device_setup_cmd(se_cmd); return se_cmd; } @@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - void *mem_ptr, *mem_bidi_ptr = NULL; - u32 sg_no_bidi = 0; + struct scatterlist *sgl_bidi = NULL; + u32 sgl_bidi_count = 0; int ret; /* * Allocate the necessary tasks to complete the received CDB+data */ - ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); - if (ret == -1) { + ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); + if (ret == -ENOMEM) { /* Out of Resources */ return PYX_TRANSPORT_LU_COMM_FAILURE; - } else if (ret == -2) { + } else if (ret == -EINVAL) { /* * Handle case for SAM_STAT_RESERVATION_CONFLICT */ @@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) */ return PYX_TRANSPORT_USE_SENSE_REASON; } + /* - * Setup the struct scatterlist memory from the received - * struct scsi_cmnd. + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. */ - if (scsi_sg_count(sc)) { - se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; - mem_ptr = (void *)scsi_sglist(sc); - /* - * For BIDI commands, pass in the extra READ buffer - * to transport_generic_map_mem_to_cmd() below.. - */ - if (T_TASK(se_cmd)->t_tasks_bidi) { - struct scsi_data_buffer *sdb = scsi_in(sc); + if (se_cmd->t_tasks_bidi) { + struct scsi_data_buffer *sdb = scsi_in(sc); - mem_bidi_ptr = (void *)sdb->table.sgl; - sg_no_bidi = sdb->table.nents; - } - } else { - /* - * Used for DMA_NONE - */ - mem_ptr = NULL; + sgl_bidi = sdb->table.sgl; + sgl_bidi_count = sdb->table.nents; } - /* - * Map the SG memory into struct se_mem->page linked list using the same - * physical memory at sg->page_link. - */ - ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, - scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); + + /* Tell the core about our preallocated memory */ + ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), + scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (ret < 0) return PYX_TRANSPORT_LU_COMM_FAILURE; @@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); } -/* - * Called from struct target_core_fabric_ops->release_cmd_to_pool() - */ -static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) +static void tcm_loop_release_cmd(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); @@ -300,7 +281,7 @@ static int tcm_loop_queuecommand( struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; - TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" + pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" " scsi_buf_len: %u\n", sc->device->host->host_no, sc->device->id, sc->device->channel, sc->device->lun, sc->cmnd[0], scsi_bufflen(sc)); @@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) */ tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { - printk(KERN_ERR "Unable to perform device reset without" + pr_err("Unable to perform device reset without" " active I_T Nexus\n"); return FAILED; } @@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { - printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); + pr_err("Unable to allocate memory for tl_cmd\n"); return FAILED; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); if (!tl_tmr) { - printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); + pr_err("Unable to allocate memory for tl_tmr\n"); goto release; } init_waitqueue_head(&tl_tmr->tl_tmr_wait); @@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) /* * Allocate the LUN_RESET TMR */ - se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, + se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* * Locate the underlying TCM struct se_lun from sc->device->lun */ - if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) goto release; /* * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() @@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) SUCCESS : FAILED; release: if (se_cmd) - transport_generic_free_cmd(se_cmd, 1, 1, 0); + transport_generic_free_cmd(se_cmd, 1, 0); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); @@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh = scsi_host_alloc(&tcm_loop_driver_template, sizeof(struct tcm_loop_hba)); if (!sh) { - printk(KERN_ERR "Unable to allocate struct scsi_host\n"); + pr_err("Unable to allocate struct scsi_host\n"); return -ENODEV; } tl_hba->sh = sh; @@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev) error = scsi_add_host(sh, &tl_hba->dev); if (error) { - printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); + pr_err("%s: scsi_add_host failed\n", __func__); scsi_host_put(sh); return -ENODEV; } @@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { - printk(KERN_ERR "device_register() failed for" + pr_err("device_register() failed for" " tl_hba->dev: %d\n", ret); return -ENODEV; } @@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void) tcm_loop_primary = root_device_register("tcm_loop_0"); if (IS_ERR(tcm_loop_primary)) { - printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); + pr_err("Unable to allocate tcm_loop_primary\n"); return PTR_ERR(tcm_loop_primary); } ret = bus_register(&tcm_loop_lld_bus); if (ret) { - printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); + pr_err("bus_register() failed for tcm_loop_lld_bus\n"); goto dev_unreg; } ret = driver_register(&tcm_loop_driverfs); if (ret) { - printk(KERN_ERR "driver_register() failed for" + pr_err("driver_register() failed for" "tcm_loop_driverfs\n"); goto bus_unreg; } - printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); + pr_debug("Initialized TCM Loop Core Bus\n"); return ret; bus_unreg: @@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void) bus_unregister(&tcm_loop_lld_bus); root_device_unregister(tcm_loop_primary); - printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); + pr_debug("Releasing TCM Loop Core BUS\n"); } static char *tcm_loop_get_fabric_name(void) @@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) case SCSI_PROTOCOL_ISCSI: return iscsi_get_fabric_proto_ident(se_tpg); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id( return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len( return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id( return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); if (!tl_nacl) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); + pr_err("Unable to allocate struct tcm_loop_nacl\n"); return NULL; } @@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) return 1; } -static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) -{ - /* - * Since TCM_loop is already passing struct scatterlist data from - * struct scsi_cmnd, no more Linux/SCSI failure dependent state need - * to be handled here. - */ - return; -} - static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) { /* @@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" + pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); sc->result = SAM_STAT_GOOD; @@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" + pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); if (se_cmd->sense_buffer && ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { - memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, + memcpy(sc->sense_buffer, se_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); @@ -972,7 +943,7 @@ static int tcm_loop_port_link( */ scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); + pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); return 0; } @@ -990,7 +961,7 @@ static void tcm_loop_port_unlink( sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); if (!sd) { - printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" + pr_err("Unable to locate struct scsi_device for %d:%d:" "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); return; } @@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink( atomic_dec(&tl_tpg->tl_tpg_port_count); smp_mb__after_atomic_dec(); - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); + pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } /* End items for tcm_loop_port_cit */ @@ -1020,14 +991,14 @@ static int tcm_loop_make_nexus( int ret = -ENOMEM; if (tl_tpg->tl_hba->tl_nexus) { - printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); + pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); if (!tl_nexus) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); + pr_err("Unable to allocate struct tcm_loop_nexus\n"); return -ENOMEM; } /* @@ -1054,9 +1025,9 @@ static int tcm_loop_make_nexus( * transport_register_session() */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, - tl_nexus->se_sess, (void *)tl_nexus); + tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; - printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); return 0; @@ -1082,13 +1053,13 @@ static int tcm_loop_drop_nexus( return -ENODEV; if (atomic_read(&tpg->tl_tpg_port_count)) { - printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" + pr_err("Unable to remove TCM_Loop I_T Nexus with" " active TPG port count: %d\n", atomic_read(&tpg->tl_tpg_port_count)); return -EPERM; } - printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), tl_nexus->se_sess->se_node_acl->initiatorname); /* @@ -1144,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus( * tcm_loop_make_nexus() */ if (strlen(page) >= TL_WWN_ADDR_LEN) { - printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" + pr_err("Emulated NAA Sas Address: %s, exceeds" " max: %d\n", page, TL_WWN_ADDR_LEN); return -EINVAL; } @@ -1153,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "naa."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { - printk(KERN_ERR "Passed SAS Initiator Port %s does not" + pr_err("Passed SAS Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1164,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "fc."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { - printk(KERN_ERR "Passed FCP Initiator Port %s does not" + pr_err("Passed FCP Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1175,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "iqn."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { - printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" + pr_err("Passed iSCSI Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1183,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus( port_ptr = &i_port[0]; goto check_newline; } - printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" + pr_err("Unable to locate prefix for emulated Initiator Port:" " %s\n", i_port); return -EINVAL; /* @@ -1223,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg( tpgt_str = strstr(name, "tpgt_"); if (!tpgt_str) { - printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" + pr_err("Unable to locate \"tpgt_#\" directory" " group\n"); return ERR_PTR(-EINVAL); } tpgt_str += 5; /* Skip ahead of "tpgt_" */ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); - if (tpgt > TL_TPGS_PER_HBA) { - printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" + if (tpgt >= TL_TPGS_PER_HBA) { + pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" " %u\n", tpgt, TL_TPGS_PER_HBA); return ERR_PTR(-EINVAL); } @@ -1242,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg( * Register the tl_tpg as a emulated SAS TCM Target Endpoint */ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, - wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, + wwn, &tl_tpg->tl_se_tpg, tl_tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return ERR_PTR(-ENOMEM); - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); @@ -1274,7 +1245,7 @@ void tcm_loop_drop_naa_tpg( */ core_tpg_deregister(se_tpg); - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); } @@ -1295,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); if (!tl_hba) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); + pr_err("Unable to allocate struct tcm_loop_hba\n"); return ERR_PTR(-ENOMEM); } /* @@ -1314,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba( goto check_len; } ptr = strstr(name, "iqn."); - if (ptr) { - tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; - goto check_len; + if (!ptr) { + pr_err("Unable to locate prefix for emulated Target " + "Port: %s\n", name); + ret = -EINVAL; + goto out; } - - printk(KERN_ERR "Unable to locate prefix for emulated Target Port:" - " %s\n", name); - return ERR_PTR(-EINVAL); + tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; check_len: if (strlen(name) >= TL_WWN_ADDR_LEN) { - printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" + pr_err("Emulated NAA %s Address: %s, exceeds" " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); - kfree(tl_hba); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto out; } snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); @@ -1344,7 +1314,7 @@ check_len: sh = tl_hba->sh; tcm_loop_hba_no_cnt++; - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" " %s Address: %s at Linux/SCSI Host ID: %d\n", tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); @@ -1367,7 +1337,7 @@ void tcm_loop_drop_scsi_hba( */ device_unregister(&tl_hba->dev); - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" + pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" " SAS Address: %s at Linux/SCSI Host ID: %d\n", config_item_name(&wwn->wwn_group.cg_item), host_no); } @@ -1402,9 +1372,9 @@ static int tcm_loop_register_configfs(void) * Register the top level struct config_item_type with TCM core */ fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); - if (!fabric) { - printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); - return -1; + if (IS_ERR(fabric)) { + pr_err("tcm_loop_register_configfs() failed!\n"); + return PTR_ERR(fabric); } /* * Setup the fabric API of function pointers used by target_core_mod @@ -1436,19 +1406,11 @@ static int tcm_loop_register_configfs(void) &tcm_loop_tpg_release_fabric_acl; fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; /* - * Since tcm_loop is mapping physical memory from Linux/SCSI - * struct scatterlist arrays for each struct scsi_cmnd I/O, - * we do not need TCM to allocate a iovec array for - * virtual memory address mappings - */ - fabric->tf_ops.alloc_cmd_iovecs = NULL; - /* * Used for setting up remaining TCM resources in process context */ fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; - fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; - fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; + fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; fabric->tf_ops.close_session = &tcm_loop_close_session; fabric->tf_ops.stop_session = &tcm_loop_stop_session; @@ -1465,7 +1427,6 @@ static int tcm_loop_register_configfs(void) &tcm_loop_set_default_node_attributes; fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; - fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; @@ -1503,7 +1464,7 @@ static int tcm_loop_register_configfs(void) */ ret = target_fabric_configfs_register(fabric); if (ret < 0) { - printk(KERN_ERR "target_fabric_configfs_register() for" + pr_err("target_fabric_configfs_register() for" " TCM_Loop failed!\n"); target_fabric_configfs_free(fabric); return -1; @@ -1512,7 +1473,7 @@ static int tcm_loop_register_configfs(void) * Setup our local pointer to *fabric. */ tcm_loop_fabric_configfs = fabric; - printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" + pr_debug("TCM_LOOP[0] - Set fabric ->" " tcm_loop_fabric_configfs\n"); return 0; } @@ -1524,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void) target_fabric_configfs_deregister(tcm_loop_fabric_configfs); tcm_loop_fabric_configfs = NULL; - printk(KERN_INFO "TCM_LOOP[0] - Cleared" + pr_debug("TCM_LOOP[0] - Cleared" " tcm_loop_fabric_configfs\n"); } @@ -1537,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void) __alignof__(struct tcm_loop_cmd), 0, NULL); if (!tcm_loop_cmd_cache) { - printk(KERN_ERR "kmem_cache_create() for" + pr_debug("kmem_cache_create() for" " tcm_loop_cmd_cache failed\n"); return -ENOMEM; } diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 7e9f7ab4554..6b76c7a22bb 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -16,12 +16,6 @@ */ #define TL_SCSI_MAX_CMD_LEN 32 -#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG -# define TL_CDB_DEBUG(x...) printk(KERN_INFO x) -#else -# define TL_CDB_DEBUG(x...) -#endif - struct tcm_loop_cmd { /* State of Linux/SCSI CDB+Data descriptor */ u32 sc_cmd_state; diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47abb42d9c3..98c98a3a025 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explict, int offline); +static u16 alua_lu_gps_counter; +static u32 alua_lu_gps_count; + +static DEFINE_SPINLOCK(lu_gps_lock); +static LIST_HEAD(lu_gps_list); + +struct t10_alua_lu_gp *default_lu_gp; + /* * REPORT_TARGET_PORT_GROUPS * @@ -53,16 +61,18 @@ static int core_alua_set_tg_pt_secondary_state( */ int core_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; + struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + buf = transport_kmap_first_data_page(cmd); + + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { /* * PREF: Preferred target port bit, determine if this @@ -124,7 +134,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * Set the RETURN DATA LENGTH set in the header of the DataIN Payload */ @@ -133,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) buf[2] = ((rd_len >> 8) & 0xff); buf[3] = (rd_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -143,45 +155,53 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) */ int core_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; - struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; - struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; + struct se_device *dev = cmd->se_dev; + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + struct se_port *port, *l_port = cmd->se_lun->lun_sep; + struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; - unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ + unsigned char *buf; + unsigned char *ptr; u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; u16 tg_pt_id, rtpi; - if (!(l_port)) + if (!l_port) return PYX_TRANSPORT_LU_COMM_FAILURE; + + buf = transport_kmap_first_data_page(cmd); + /* * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; - if (!(l_tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + if (!l_tg_pt_gp_mem) { + pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; - if (!(l_tg_pt_gp)) { + if (!l_tg_pt_gp) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (!(rc)) { - printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" + if (!rc) { + pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } + ptr = &buf[4]; /* Skip over RESERVED area in header */ + while (len < cmd->data_length) { alua_access_state = (ptr[0] & 0x0f); /* @@ -201,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * REQUEST, and the additional sense code set to INVALID * FIELD IN PARAMETER LIST. */ - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; } rc = -1; /* @@ -224,11 +245,11 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * Locate the matching target port group ID from * the global tg_pt_gp list */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, - &T10_ALUA(su_dev)->tg_pt_gps_list, + &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) @@ -236,24 +257,26 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_inc(); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); rc = core_alua_do_port_transition(tg_pt_gp, dev, l_port, nacl, alua_access_state, 1); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_dec(); break; } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * If not matching target port group ID can be located * throw an exception with ASCQ: INVALID_PARAMETER_LIST */ - if (rc != 0) - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + if (rc != 0) { + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } } else { /* * Extact the RELATIVE TARGET PORT IDENTIFIER to identify @@ -287,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * be located, throw an exception with ASCQ: * INVALID_PARAMETER_LIST */ - if (rc != 0) - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + if (rc != 0) { + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } } ptr += 4; len += 4; } +out: + transport_kunmap_first_data_page(cmd); + return 0; } @@ -464,13 +492,13 @@ static int core_alua_state_check( unsigned char *cdb, u8 *alua_ascq) { - struct se_lun *lun = SE_LUN(cmd); + struct se_lun *lun = cmd->se_lun; struct se_port *port = lun->lun_sep; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; int out_alua_state, nonop_delay_msecs; - if (!(port)) + if (!port) return 0; /* * First, check for a struct se_port specific secondary ALUA target port @@ -478,7 +506,7 @@ static int core_alua_state_check( */ if (atomic_read(&port->sep_tg_pt_secondary_offline)) { *alua_ascq = ASCQ_04H_ALUA_OFFLINE; - printk(KERN_INFO "ALUA: Got secondary offline status for local" + pr_debug("ALUA: Got secondary offline status for local" " target port\n"); *alua_ascq = ASCQ_04H_ALUA_OFFLINE; return 1; @@ -520,9 +548,9 @@ static int core_alua_state_check( */ case ALUA_ACCESS_STATE_OFFLINE: default: - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", + pr_err("Unknown ALUA access state: 0x%02x\n", out_alua_state); - return -1; + return -EINVAL; } return 0; @@ -552,8 +580,8 @@ static int core_alua_check_transition(int state, int *primary) *primary = 0; break; default: - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); - return -1; + pr_err("Unknown ALUA access state: 0x%02x\n", state); + return -EINVAL; } return 0; @@ -610,7 +638,7 @@ int core_alua_check_nonop_delay( * The ALUA Active/NonOptimized access state delay can be disabled * in via configfs with a value of zero */ - if (!(cmd->alua_nonop_delay)) + if (!cmd->alua_nonop_delay) return 0; /* * struct se_cmd->alua_nonop_delay gets set by a target port group @@ -639,7 +667,7 @@ static int core_alua_write_tpg_metadata( file = filp_open(path, flags, 0600); if (IS_ERR(file) || !file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", + pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } @@ -653,7 +681,7 @@ static int core_alua_write_tpg_metadata( set_fs(old_fs); if (ret < 0) { - printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); + pr_err("Error writing ALUA metadata file: %s\n", path); filp_close(file, NULL); return -EIO; } @@ -750,7 +778,7 @@ static int core_alua_do_transition_tg_pt( * se_deve->se_lun_acl pointer may be NULL for a * entry created without explict Node+MappedLUN ACLs */ - if (!(lacl)) + if (!lacl) continue; if (explict && @@ -792,7 +820,7 @@ static int core_alua_do_transition_tg_pt( */ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " from primary access state %s to %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), @@ -823,8 +851,8 @@ int core_alua_do_port_transition( return -EINVAL; md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); - if (!(md_buf)) { - printk("Unable to allocate buf for ALUA metadata\n"); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); return -ENOMEM; } @@ -839,7 +867,7 @@ int core_alua_do_port_transition( * we only do transition on the passed *l_tp_pt_gp, and not * on all of the matching target port groups IDs in default_lu_gp. */ - if (!(lu_gp->lu_gp_id)) { + if (!lu_gp->lu_gp_id) { /* * core_alua_do_transition_tg_pt() will always return * success. @@ -866,12 +894,12 @@ int core_alua_do_port_transition( smp_mb__after_atomic_inc(); spin_unlock(&lu_gp->lu_gp_lock); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, - &T10_ALUA(su_dev)->tg_pt_gps_list, + &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; /* * If the target behavior port asymmetric access state @@ -893,7 +921,7 @@ int core_alua_do_port_transition( } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_inc(); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return * success. @@ -901,11 +929,11 @@ int core_alua_do_port_transition( core_alua_do_transition_tg_pt(tg_pt_gp, port, nacl, md_buf, new_state, explict); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_dec(); } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); @@ -913,7 +941,7 @@ int core_alua_do_port_transition( } spin_unlock(&lu_gp->lu_gp_lock); - printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" " Group IDs: %hu %s transition to primary state: %s\n", config_item_name(&lu_gp->lu_gp_group.cg_item), l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", @@ -942,11 +970,11 @@ static int core_alua_update_tpg_secondary_metadata( memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", - TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); - if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) + if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", @@ -954,7 +982,7 @@ static int core_alua_update_tpg_secondary_metadata( port->sep_tg_pt_secondary_stat); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", - TPG_TFO(se_tpg)->get_fabric_name(), wwn, + se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); return core_alua_write_tpg_metadata(path, md_buf, len); @@ -973,11 +1001,11 @@ static int core_alua_set_tg_pt_secondary_state( spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if (!(tg_pt_gp)) { + if (!tg_pt_gp) { spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_ERR "Unable to complete secondary state" + pr_err("Unable to complete secondary state" " transition\n"); - return -1; + return -EINVAL; } trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; /* @@ -994,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state( ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " to secondary access state: %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); @@ -1012,10 +1040,10 @@ static int core_alua_set_tg_pt_secondary_state( */ if (port->sep_tg_pt_secondary_write_md) { md_buf = kzalloc(md_buf_len, GFP_KERNEL); - if (!(md_buf)) { - printk(KERN_ERR "Unable to allocate md_buf for" + if (!md_buf) { + pr_err("Unable to allocate md_buf for" " secondary ALUA access metadata\n"); - return -1; + return -ENOMEM; } mutex_lock(&port->sep_tg_pt_md_mutex); core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, @@ -1034,19 +1062,19 @@ core_alua_allocate_lu_gp(const char *name, int def_group) struct t10_alua_lu_gp *lu_gp; lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); - if (!(lu_gp)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); + if (!lu_gp) { + pr_err("Unable to allocate struct t10_alua_lu_gp\n"); return ERR_PTR(-ENOMEM); } - INIT_LIST_HEAD(&lu_gp->lu_gp_list); + INIT_LIST_HEAD(&lu_gp->lu_gp_node); INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); spin_lock_init(&lu_gp->lu_gp_lock); atomic_set(&lu_gp->lu_gp_ref_cnt, 0); if (def_group) { - lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++; + lu_gp->lu_gp_id = alua_lu_gps_counter++; lu_gp->lu_gp_valid_id = 1; - se_global->alua_lu_gps_count++; + alua_lu_gps_count++; } return lu_gp; @@ -1060,41 +1088,41 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) * The lu_gp->lu_gp_id may only be set once.. */ if (lu_gp->lu_gp_valid_id) { - printk(KERN_WARNING "ALUA LU Group already has a valid ID," + pr_warn("ALUA LU Group already has a valid ID," " ignoring request\n"); - return -1; + return -EINVAL; } - spin_lock(&se_global->lu_gps_lock); - if (se_global->alua_lu_gps_count == 0x0000ffff) { - printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" + spin_lock(&lu_gps_lock); + if (alua_lu_gps_count == 0x0000ffff) { + pr_err("Maximum ALUA alua_lu_gps_count:" " 0x0000ffff reached\n"); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); - return -1; + return -ENOSPC; } again: lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : - se_global->alua_lu_gps_counter++; + alua_lu_gps_counter++; - list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { + list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { - if (!(lu_gp_id)) + if (!lu_gp_id) goto again; - printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" + pr_warn("ALUA Logical Unit Group ID: %hu" " already exists, ignoring request\n", lu_gp_id); - spin_unlock(&se_global->lu_gps_lock); - return -1; + spin_unlock(&lu_gps_lock); + return -EINVAL; } } lu_gp->lu_gp_id = lu_gp_id_tmp; lu_gp->lu_gp_valid_id = 1; - list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); - se_global->alua_lu_gps_count++; - spin_unlock(&se_global->lu_gps_lock); + list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); + alua_lu_gps_count++; + spin_unlock(&lu_gps_lock); return 0; } @@ -1105,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev) struct t10_alua_lu_gp_member *lu_gp_mem; lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); - if (!(lu_gp_mem)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); + if (!lu_gp_mem) { + pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); @@ -1130,11 +1158,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * no associations can be made while we are releasing * struct t10_alua_lu_gp. */ - spin_lock(&se_global->lu_gps_lock); + spin_lock(&lu_gps_lock); atomic_set(&lu_gp->lu_gp_shutdown, 1); - list_del(&lu_gp->lu_gp_list); - se_global->alua_lu_gps_count--; - spin_unlock(&se_global->lu_gps_lock); + list_del(&lu_gp->lu_gp_node); + alua_lu_gps_count--; + spin_unlock(&lu_gps_lock); /* * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() * in target_core_configfs.c:target_core_store_alua_lu_gp() to be @@ -1165,9 +1193,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * we want to re-assocate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); - if (lu_gp != se_global->default_lu_gp) + if (lu_gp != default_lu_gp) __core_alua_attach_lu_gp_mem(lu_gp_mem, - se_global->default_lu_gp); + default_lu_gp); else lu_gp_mem->lu_gp = NULL; spin_unlock(&lu_gp_mem->lu_gp_mem_lock); @@ -1182,7 +1210,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) void core_alua_free_lu_gp_mem(struct se_device *dev) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1190,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) return; lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) + if (!lu_gp_mem) return; while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) @@ -1198,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { spin_lock(&lu_gp->lu_gp_lock); if (lu_gp_mem->lu_gp_assoc) { list_del(&lu_gp_mem->lu_gp_mem_list); @@ -1218,27 +1246,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) struct t10_alua_lu_gp *lu_gp; struct config_item *ci; - spin_lock(&se_global->lu_gps_lock); - list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { - if (!(lu_gp->lu_gp_valid_id)) + spin_lock(&lu_gps_lock); + list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { + if (!lu_gp->lu_gp_valid_id) continue; ci = &lu_gp->lu_gp_group.cg_item; - if (!(strcmp(config_item_name(ci), name))) { + if (!strcmp(config_item_name(ci), name)) { atomic_inc(&lu_gp->lu_gp_ref_cnt); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); return lu_gp; } } - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); return NULL; } void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) { - spin_lock(&se_global->lu_gps_lock); + spin_lock(&lu_gps_lock); atomic_dec(&lu_gp->lu_gp_ref_cnt); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); } /* @@ -1279,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( struct t10_alua_tg_pt_gp *tg_pt_gp; tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); - if (!(tg_pt_gp)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); + if (!tg_pt_gp) { + pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); return NULL; } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); @@ -1304,14 +1332,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; if (def_group) { - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); tg_pt_gp->tg_pt_gp_id = - T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + su_dev->t10_alua.alua_tg_pt_gps_counter++; tg_pt_gp->tg_pt_gp_valid_id = 1; - T10_ALUA(su_dev)->alua_tg_pt_gps_count++; + su_dev->t10_alua.alua_tg_pt_gps_count++; list_add_tail(&tg_pt_gp->tg_pt_gp_list, - &T10_ALUA(su_dev)->tg_pt_gps_list); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + &su_dev->t10_alua.tg_pt_gps_list); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); } return tg_pt_gp; @@ -1328,42 +1356,42 @@ int core_alua_set_tg_pt_gp_id( * The tg_pt_gp->tg_pt_gp_id may only be set once.. */ if (tg_pt_gp->tg_pt_gp_valid_id) { - printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," + pr_warn("ALUA TG PT Group already has a valid ID," " ignoring request\n"); - return -1; + return -EINVAL; } - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { - printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { + pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); - return -1; + return -ENOSPC; } again: tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : - T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + su_dev->t10_alua.alua_tg_pt_gps_counter++; - list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, + list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { - if (!(tg_pt_gp_id)) + if (!tg_pt_gp_id) goto again; - printk(KERN_ERR "ALUA Target Port Group ID: %hu already" + pr_err("ALUA Target Port Group ID: %hu already" " exists, ignoring request\n", tg_pt_gp_id); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - return -1; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); + return -EINVAL; } } tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; tg_pt_gp->tg_pt_gp_valid_id = 1; list_add_tail(&tg_pt_gp->tg_pt_gp_list, - &T10_ALUA(su_dev)->tg_pt_gps_list); - T10_ALUA(su_dev)->alua_tg_pt_gps_count++; - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + &su_dev->t10_alua.tg_pt_gps_list); + su_dev->t10_alua.alua_tg_pt_gps_count++; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return 0; } @@ -1375,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, GFP_KERNEL); - if (!(tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); + if (!tg_pt_gp_mem) { + pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); @@ -1403,10 +1431,10 @@ void core_alua_free_tg_pt_gp( * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_del(&tg_pt_gp->tg_pt_gp_list); - T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + su_dev->t10_alua.alua_tg_pt_gps_counter--; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1438,9 +1466,9 @@ void core_alua_free_tg_pt_gp( * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { + if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); } else tg_pt_gp_mem->tg_pt_gp = NULL; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1455,7 +1483,7 @@ void core_alua_free_tg_pt_gp( void core_alua_free_tg_pt_gp_mem(struct se_port *port) { struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; @@ -1463,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) return; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) + if (!tg_pt_gp_mem) return; while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) @@ -1471,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { spin_lock(&tg_pt_gp->tg_pt_gp_lock); if (tg_pt_gp_mem->tg_pt_gp_assoc) { list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); @@ -1493,19 +1521,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( struct t10_alua_tg_pt_gp *tg_pt_gp; struct config_item *ci; - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; ci = &tg_pt_gp->tg_pt_gp_group.cg_item; - if (!(strcmp(config_item_name(ci), name))) { + if (!strcmp(config_item_name(ci), name)) { atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return tg_pt_gp; } } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return NULL; } @@ -1515,9 +1543,9 @@ static void core_alua_put_tg_pt_gp_from_name( { struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); } /* @@ -1555,7 +1583,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) { struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; struct config_item *tg_pt_ci; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ssize_t len = 0; @@ -1564,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) return len; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) + if (!tg_pt_gp_mem) return len; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" " %hu\nTG Port Primary Access State: %s\nTG Port " @@ -1605,16 +1633,16 @@ ssize_t core_alua_store_tg_pt_gp_info( tpg = port->sep_tpg; lun = port->sep_lun; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" - " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { + pr_warn("SPC3_ALUA_EMULATED not enabled for" + " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); return -EINVAL; } if (count > TG_PT_GROUP_NAME_BUF) { - printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); + pr_err("ALUA Target Port Group alias too large!\n"); return -EINVAL; } memset(buf, 0, TG_PT_GROUP_NAME_BUF); @@ -1631,31 +1659,31 @@ ssize_t core_alua_store_tg_pt_gp_info( */ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, strstrip(buf)); - if (!(tg_pt_gp_new)) + if (!tg_pt_gp_new) return -ENODEV; } tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) { + if (!tg_pt_gp_mem) { if (tg_pt_gp_new) core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); - printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); + pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); return -EINVAL; } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing * with the default_tg_pt_gp. */ - if (!(tg_pt_gp_new)) { - printk(KERN_INFO "Target_Core_ConfigFS: Moving" + if (!tg_pt_gp_new) { + pr_debug("Target_Core_ConfigFS: Moving" " %s/tpgt_%hu/%s from ALUA Target Port Group:" " alua/%s, ID: %hu back to" " default_tg_pt_gp\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name( &tg_pt_gp->tg_pt_gp_group.cg_item), @@ -1663,7 +1691,7 @@ ssize_t core_alua_store_tg_pt_gp_info( __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); return count; @@ -1679,10 +1707,10 @@ ssize_t core_alua_store_tg_pt_gp_info( */ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" + pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? - "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), tg_pt_gp_new->tg_pt_gp_id); @@ -1716,11 +1744,11 @@ ssize_t core_alua_store_access_type( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_access_type\n"); + pr_err("Unable to extract alua_access_type\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { - printk(KERN_ERR "Illegal value for alua_access_type:" + pr_err("Illegal value for alua_access_type:" " %lu\n", tmp); return -EINVAL; } @@ -1754,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); + pr_err("Unable to extract nonop_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { - printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" + pr_err("Passed nonop_delay_msecs: %lu, exceeds" " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, ALUA_MAX_NONOP_DELAY_MSECS); return -EINVAL; @@ -1785,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); + pr_err("Unable to extract trans_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { - printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" + pr_err("Passed trans_delay_msecs: %lu, exceeds" " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, ALUA_MAX_TRANS_DELAY_MSECS); return -EINVAL; @@ -1816,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract preferred ALUA value\n"); + pr_err("Unable to extract preferred ALUA value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); + pr_err("Illegal value for preferred ALUA: %lu\n", tmp); return -EINVAL; } tg_pt_gp->tg_pt_gp_pref = (int)tmp; @@ -1830,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit( ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) { - if (!(lun->lun_sep)) + if (!lun->lun_sep) return -ENODEV; return sprintf(page, "%d\n", @@ -1846,22 +1874,22 @@ ssize_t core_alua_store_offline_bit( unsigned long tmp; int ret; - if (!(lun->lun_sep)) + if (!lun->lun_sep) return -ENODEV; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); + pr_err("Unable to extract alua_tg_pt_offline value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", + pr_err("Illegal value for alua_tg_pt_offline: %lu\n", tmp); return -EINVAL; } tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); + if (!tg_pt_gp_mem) { + pr_err("Unable to locate *tg_pt_gp_mem\n"); return -EINVAL; } @@ -1890,13 +1918,13 @@ ssize_t core_alua_store_secondary_status( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); + pr_err("Unable to extract alua_tg_pt_status\n"); return -EINVAL; } if ((tmp != ALUA_STATUS_NONE) && (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", + pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; } @@ -1923,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); + pr_err("Unable to extract alua_tg_pt_write_md\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" + pr_err("Illegal value for alua_tg_pt_write_md:" " %lu\n", tmp); return -EINVAL; } @@ -1939,7 +1967,7 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev, int force_pt) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_lu_gp_member *lu_gp_mem; /* * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic @@ -1947,44 +1975,44 @@ int core_setup_alua(struct se_device *dev, int force_pt) * cause a problem because libata and some SATA RAID HBAs appear * under Linux/SCSI, but emulate SCSI logic themselves. */ - if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && - !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { + if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { alua->alua_type = SPC_ALUA_PASSTHROUGH; alua->alua_state_check = &core_alua_state_check_nop; - printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" - " emulation\n", TRANSPORT(dev)->name); + pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" + " emulation\n", dev->transport->name); return 0; } /* * If SPC-3 or above is reported by real or emulated struct se_device, * use emulated ALUA. */ - if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { - printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" - " device\n", TRANSPORT(dev)->name); + if (dev->transport->get_device_rev(dev) >= SCSI_3) { + pr_debug("%s: Enabling ALUA Emulation for SPC-3" + " device\n", dev->transport->name); /* * Associate this struct se_device with the default ALUA * LUN Group. */ lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); - if (IS_ERR(lu_gp_mem) || !lu_gp_mem) - return -1; + if (IS_ERR(lu_gp_mem)) + return PTR_ERR(lu_gp_mem); alua->alua_type = SPC3_ALUA_EMULATED; alua->alua_state_check = &core_alua_state_check; spin_lock(&lu_gp_mem->lu_gp_mem_lock); __core_alua_attach_lu_gp_mem(lu_gp_mem, - se_global->default_lu_gp); + default_lu_gp); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); - printk(KERN_INFO "%s: Adding to default ALUA LU Group:" + pr_debug("%s: Adding to default ALUA LU Group:" " core/alua/lu_gps/default_lu_gp\n", - TRANSPORT(dev)->name); + dev->transport->name); } else { alua->alua_type = SPC2_ALUA_DISABLED; alua->alua_state_check = &core_alua_state_check_nop; - printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" - " device\n", TRANSPORT(dev)->name); + pr_debug("%s: Disabling ALUA Emulation for SPC-2" + " device\n", dev->transport->name); } return 0; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7f19c8b7b84..8ae09a1bdf7 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -23,6 +23,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <linux/kernel.h> #include <asm/unaligned.h> #include <scsi/scsi.h> @@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) static int target_emulate_inquiry_std(struct se_cmd *cmd) { - struct se_lun *lun = SE_LUN(cmd); - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_lun *lun = cmd->se_lun; + struct se_device *dev = cmd->se_dev; + unsigned char *buf; /* * Make sure we at least have 6 bytes of INQUIRY response * payload going back for EVPD=0 */ if (cmd->data_length < 6) { - printk(KERN_ERR "SCSI Inquiry payload length: %u" + pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=0\n", cmd->data_length); - return -1; + return -EINVAL; } + buf = transport_kmap_first_data_page(cmd); + buf[0] = dev->transport->get_device_type(dev); if (buf[0] == TYPE_TAPE) buf[1] = 0x80; @@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd) /* * Enable SCCS and TPGS fields for Emulated ALUA */ - if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) + if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) target_fill_alua_data(lun->lun_sep, buf); if (cmd->data_length < 8) { buf[4] = 1; /* Set additional length to 1 */ - return 0; + goto out; } buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ @@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd) */ if (cmd->data_length < 36) { buf[4] = 3; /* Set additional length to 3 */ - return 0; + goto out; } snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); snprintf((unsigned char *)&buf[16], 16, "%s", - &DEV_T10_WWN(dev)->model[0]); + &dev->se_sub_dev->t10_wwn.model[0]); snprintf((unsigned char *)&buf[32], 4, "%s", - &DEV_T10_WWN(dev)->revision[0]); + &dev->se_sub_dev->t10_wwn.revision[0]); buf[4] = 31; /* Set additional length to 31 */ - return 0; -} - -/* supported vital product data pages */ -static int -target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) -{ - buf[1] = 0x00; - if (cmd->data_length < 8) - return 0; - - buf[4] = 0x0; - /* - * Only report the INQUIRY EVPD=1 pages after a valid NAA - * Registered Extended LUN WWN has been set via ConfigFS - * during device creation/restart. - */ - if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & - SDF_EMULATED_VPD_UNIT_SERIAL) { - buf[3] = 3; - buf[5] = 0x80; - buf[6] = 0x83; - buf[7] = 0x86; - } +out: + transport_kunmap_first_data_page(cmd); return 0; } @@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; u16 len = 0; - buf[1] = 0x80; if (dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { u32 unit_serial_len; unit_serial_len = - strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); unit_serial_len++; /* For NULL Terminator */ if (((len + 4) + unit_serial_len) > cmd->data_length) { @@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) return 0; } len += sprintf((unsigned char *)&buf[4], "%s", - &DEV_T10_WWN(dev)->unit_serial[0]); + &dev->se_sub_dev->t10_wwn.unit_serial[0]); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } @@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); - struct se_lun *lun = SE_LUN(cmd); + struct se_device *dev = cmd->se_dev; + struct se_lun *lun = cmd->se_lun; struct se_port *port = NULL; struct se_portal_group *tpg = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char binary, binary_new; - unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; + unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; u32 prod_len; u32 unit_serial_len, off = 0; - int i; u16 len = 0, id_len; - buf[1] = 0x83; off = 4; /* @@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) /* CODE SET == Binary */ buf[off++] = 0x1; - /* Set ASSOICATION == addressed logical unit: 0)b */ + /* Set ASSOCIATION == addressed logical unit: 0)b */ buf[off] = 0x00; /* Identifier/Designator type == NAA identifier */ - buf[off++] = 0x3; + buf[off++] |= 0x3; off++; /* Identifier/Designator length */ @@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ - binary = transport_asciihex_to_binaryhex( - &DEV_T10_WWN(dev)->unit_serial[0]); - buf[off++] |= (binary & 0xf0) >> 4; - for (i = 0; i < 24; i += 2) { - binary_new = transport_asciihex_to_binaryhex( - &DEV_T10_WWN(dev)->unit_serial[i+2]); - buf[off] = (binary & 0x0f) << 4; - buf[off++] |= (binary_new & 0xf0) >> 4; - binary = binary_new; - } + buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); + hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); + len = 20; off = (len + 4); @@ -263,7 +233,7 @@ check_t10_vend_desc: if (dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { unit_serial_len = - strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); unit_serial_len++; /* For NULL Terminator */ if ((len + (id_len + 4) + @@ -274,7 +244,7 @@ check_t10_vend_desc: } id_len += sprintf((unsigned char *)&buf[off+12], "%s:%s", prod, - &DEV_T10_WWN(dev)->unit_serial[0]); + &dev->se_sub_dev->t10_wwn.unit_serial[0]); } buf[off] = 0x2; /* ASCII */ buf[off+1] = 0x1; /* T10 Vendor ID */ @@ -312,10 +282,10 @@ check_port: goto check_tpgi; } buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x1; /* CODE SET == Binary */ buf[off] = 0x80; /* Set PIV=1 */ - /* Set ASSOICATION == target port: 01b */ + /* Set ASSOCIATION == target port: 01b */ buf[off] |= 0x10; /* DESIGNATOR TYPE == Relative target port identifer */ buf[off++] |= 0x4; @@ -335,7 +305,7 @@ check_port: * section 7.5.1 Table 362 */ check_tpgi: - if (T10_ALUA(dev->se_sub_dev)->alua_type != + if (dev->se_sub_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) goto check_scsi_name; @@ -349,7 +319,7 @@ check_tpgi: spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if (!(tg_pt_gp)) { + if (!tg_pt_gp) { spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); goto check_lu_gp; } @@ -357,10 +327,10 @@ check_tpgi: spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x1; /* CODE SET == Binary */ buf[off] = 0x80; /* Set PIV=1 */ - /* Set ASSOICATION == target port: 01b */ + /* Set ASSOCIATION == target port: 01b */ buf[off] |= 0x10; /* DESIGNATOR TYPE == Target port group identifier */ buf[off++] |= 0x5; @@ -380,12 +350,12 @@ check_lu_gp: goto check_scsi_name; } lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) + if (!lu_gp_mem) goto check_scsi_name; spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if (!(lu_gp)) { + if (!lu_gp) { spin_unlock(&lu_gp_mem->lu_gp_mem_lock); goto check_scsi_name; } @@ -409,7 +379,7 @@ check_lu_gp: * section 7.5.1 Table 362 */ check_scsi_name: - scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); + scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ scsi_name_len += 10; /* Check for 4-byte padding */ @@ -424,10 +394,10 @@ check_scsi_name: goto set_len; } buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x3; /* CODE SET == UTF-8 */ buf[off] = 0x80; /* Set PIV=1 */ - /* Set ASSOICATION == target port: 01b */ + /* Set ASSOCIATION == target port: 01b */ buf[off] |= 0x10; /* DESIGNATOR TYPE == SCSI name string */ buf[off++] |= 0x8; @@ -438,9 +408,9 @@ check_scsi_name: * Target Port, this means "<iSCSI name>,t,0x<TPGT> in * UTF-8 encoding. */ - tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", - TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); + tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); scsi_name_len += 1 /* Include NULL terminator */; /* * The null-terminated, null-padded (see 4.4.2) SCSI @@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) if (cmd->data_length < 60) return 0; - buf[1] = 0x86; buf[2] = 0x3c; /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) + if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) buf[6] = 0x01; return 0; } @@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; int have_tp = 0; /* @@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) * emulate_tpu=1 or emulate_tpws=1 we will be expect a * different page length for Thin Provisioning. */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) have_tp = 1; if (cmd->data_length < (0x10 + 4)) { - printk(KERN_INFO "Received data_length: %u" + pr_debug("Received data_length: %u" " too small for EVPD 0xb0\n", cmd->data_length); - return -1; + return -EINVAL; } if (have_tp && cmd->data_length < (0x3c + 4)) { - printk(KERN_INFO "Received data_length: %u" + pr_debug("Received data_length: %u" " too small for TPE=1 EVPD 0xb0\n", cmd->data_length); have_tp = 0; } buf[0] = dev->transport->get_device_type(dev); - buf[1] = 0xb0; buf[3] = have_tp ? 0x3c : 0x10; + /* Set WSNZ to 1 */ + buf[4] = 0x01; + /* * Set OPTIMAL TRANSFER LENGTH GRANULARITY */ @@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH */ - put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); /* * Exit now if we don't support TP or the initiator sent a too @@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM UNMAP LBA COUNT */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); /* * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, &buf[24]); /* * Set OPTIMAL UNMAP GRANULARITY */ - put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); /* * UNMAP GRANULARITY ALIGNMENT */ - put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, &buf[32]); - if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) + if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) buf[32] |= 0x80; /* Set the UGAVALID bit */ return 0; } +/* Block Device Characteristics VPD page */ +static int +target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x3c; + + if (cmd->data_length >= 5 && + dev->se_sub_dev->se_dev_attrib.is_nonrot) + buf[5] = 1; + + return 0; +} + /* Thin Provisioning VPD */ static int target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; /* * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: @@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * defined in table 162. */ buf[0] = dev->transport->get_device_type(dev); - buf[1] = 0xb2; /* * Set Hardcoded length mentioned above for DP=0 @@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * the UNMAP command (see 5.25). A TPU bit set to zero indicates * that the device server does not support the UNMAP command. */ - if (DEV_ATTRIB(dev)->emulate_tpu != 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) buf[5] = 0x80; /* @@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * A TPWS bit set to zero indicates that the device server does not * support the use of the WRITE SAME (16) command to unmap LBAs. */ - if (DEV_ATTRIB(dev)->emulate_tpws != 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) buf[5] |= 0x40; return 0; } static int +target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); + +static struct { + uint8_t page; + int (*emulate)(struct se_cmd *, unsigned char *); +} evpd_handlers[] = { + { .page = 0x00, .emulate = target_emulate_evpd_00 }, + { .page = 0x80, .emulate = target_emulate_evpd_80 }, + { .page = 0x83, .emulate = target_emulate_evpd_83 }, + { .page = 0x86, .emulate = target_emulate_evpd_86 }, + { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, + { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, + { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, +}; + +/* supported vital product data pages */ +static int +target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) +{ + int p; + + if (cmd->data_length < 8) + return 0; + /* + * Only report the INQUIRY EVPD=1 pages after a valid NAA + * Registered Extended LUN WWN has been set via ConfigFS + * during device creation/restart. + */ + if (cmd->se_dev->se_sub_dev->su_dev_flags & + SDF_EMULATED_VPD_UNIT_SERIAL) { + buf[3] = ARRAY_SIZE(evpd_handlers); + for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers), + cmd->data_length - 4); ++p) + buf[p + 4] = evpd_handlers[p].page; + } + + return 0; +} + +static int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf; - unsigned char *cdb = cmd->t_task->t_task_cdb; + struct se_device *dev = cmd->se_dev; + unsigned char *buf; + unsigned char *cdb = cmd->t_task_cdb; + int p, ret; if (!(cdb[1] & 0x1)) return target_emulate_inquiry_std(cmd); @@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd) * payload length left for the next outgoing EVPD metadata */ if (cmd->data_length < 4) { - printk(KERN_ERR "SCSI Inquiry payload length: %u" + pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=1\n", cmd->data_length); - return -1; + return -EINVAL; } + + buf = transport_kmap_first_data_page(cmd); + buf[0] = dev->transport->get_device_type(dev); - switch (cdb[2]) { - case 0x00: - return target_emulate_evpd_00(cmd, buf); - case 0x80: - return target_emulate_evpd_80(cmd, buf); - case 0x83: - return target_emulate_evpd_83(cmd, buf); - case 0x86: - return target_emulate_evpd_86(cmd, buf); - case 0xb0: - return target_emulate_evpd_b0(cmd, buf); - case 0xb2: - return target_emulate_evpd_b2(cmd, buf); - default: - printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); - return -1; - } + for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) + if (cdb[2] == evpd_handlers[p].page) { + buf[1] = cdb[2]; + ret = evpd_handlers[p].emulate(cmd, buf); + transport_kunmap_first_data_page(cmd); + return ret; + } - return 0; + transport_kunmap_first_data_page(cmd); + pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); + return -EINVAL; } static int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd) else blocks = (u32)blocks_long; + buf = transport_kmap_first_data_page(cmd); + buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; buf[2] = (blocks >> 8) & 0xff; buf[3] = blocks & 0xff; - buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; - buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; - buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; - buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; + buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; /* * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) put_unaligned_be32(0xFFFFFFFF, &buf[0]); + transport_kunmap_first_data_page(cmd); + return 0; } static int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); + buf = transport_kmap_first_data_page(cmd); + buf[0] = (blocks >> 56) & 0xff; buf[1] = (blocks >> 48) & 0xff; buf[2] = (blocks >> 40) & 0xff; @@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) buf[5] = (blocks >> 16) & 0xff; buf[6] = (blocks >> 8) & 0xff; buf[7] = blocks & 0xff; - buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; - buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; - buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; - buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; + buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) buf[14] = 0x80; + transport_kunmap_first_data_page(cmd); + return 0; } @@ -737,6 +767,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p) p[1] = 0x0a; p[2] = 2; /* + * From spc4r23, 7.4.7 Control mode page + * + * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies + * restrictions on the algorithm used for reordering commands + * having the SIMPLE task attribute (see SAM-4). + * + * Table 368 -- QUEUE ALGORITHM MODIFIER field + * Code Description + * 0h Restricted reordering + * 1h Unrestricted reordering allowed + * 2h to 7h Reserved + * 8h to Fh Vendor specific + * + * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that + * the device server shall order the processing sequence of commands + * having the SIMPLE task attribute such that data integrity is maintained + * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol + * requests is halted at any time, the final value of all data observable + * on the medium shall be the same as if all the commands had been processed + * with the ORDERED task attribute). + * + * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the + * device server may reorder the processing sequence of commands having the + * SIMPLE task attribute in any manner. Any data integrity exposures related to + * command sequence order shall be explicitly handled by the application client + * through the selection of appropriate ommands and task attributes. + */ + p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; + /* * From spc4r17, section 7.4.6 Control mode Page * * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b @@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p) * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless * to the number of commands completed with one of those status codes. */ - p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : - (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; + p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : + (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; /* * From spc4r17, section 7.4.6 Control mode Page * @@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) * which the command was received shall be completed with TASK ABORTED * status (see SAM-4). */ - p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; + p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; p[8] = 0xff; p[9] = 0xff; p[11] = 30; @@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) { p[0] = 0x08; p[1] = 0x12; - if (DEV_ATTRIB(dev)->emulate_write_cache > 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) p[2] = 0x04; /* Write Cache Enable */ p[12] = 0x20; /* Disabled Read Ahead */ @@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type) static int target_emulate_modesense(struct se_cmd *cmd, int ten) { - struct se_device *dev = SE_DEV(cmd); - char *cdb = cmd->t_task->t_task_cdb; - unsigned char *rbuf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + char *cdb = cmd->t_task_cdb; + unsigned char *rbuf; int type = dev->transport->get_device_type(dev); int offset = (ten) ? 8 : 4; int length = 0; @@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) length += target_modesense_control(dev, &buf[offset+length]); break; default: - printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", + pr_err("Got Unknown Mode Page: 0x%02x\n", cdb[2] & 0x3f); return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; } @@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) buf[0] = (offset >> 8) & 0xff; buf[1] = offset & 0xff; - if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) target_modesense_write_protect(&buf[3], type); - if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && - (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) target_modesense_dpofua(&buf[3], type); if ((offset + 2) > cmd->data_length) @@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) offset -= 1; buf[0] = offset & 0xff; - if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) target_modesense_write_protect(&buf[2], type); - if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && - (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) target_modesense_dpofua(&buf[2], type); if ((offset + 1) > cmd->data_length) offset = cmd->data_length; } + + rbuf = transport_kmap_first_data_page(cmd); memcpy(rbuf, buf, offset); + transport_kunmap_first_data_page(cmd); return 0; } @@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) static int target_emulate_request_sense(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task->t_task_cdb; - unsigned char *buf = cmd->t_task->t_task_buf; + unsigned char *cdb = cmd->t_task_cdb; + unsigned char *buf; u8 ua_asc = 0, ua_ascq = 0; + int err = 0; if (cdb[1] & 0x01) { - printk(KERN_ERR "REQUEST_SENSE description emulation not" + pr_err("REQUEST_SENSE description emulation not" " supported\n"); return PYX_TRANSPORT_INVALID_CDB_FIELD; } - if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { + + buf = transport_kmap_first_data_page(cmd); + + if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { /* * CURRENT ERROR, UNIT ATTENTION */ @@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd) */ if (cmd->data_length <= 18) { buf[7] = 0x00; - return 0; + err = -EINVAL; + goto end; } /* * The Additional Sense Code (ASC) from the UNIT ATTENTION @@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd) */ if (cmd->data_length <= 18) { buf[7] = 0x00; - return 0; + err = -EINVAL; + goto end; } /* * NO ADDITIONAL SENSE INFORMATION @@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd) buf[7] = 0x0A; } +end: + transport_kunmap_first_data_page(cmd); + return 0; } @@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd) static int target_emulate_unmap(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); - unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; + unsigned char *buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task_cdb[0]; sector_t lba; unsigned int size = cmd->data_length, range; - int ret, offset; + int ret = 0, offset; unsigned short dl, bd_dl; /* First UNMAP block descriptor starts at 8 byte offset */ @@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task) size -= 8; dl = get_unaligned_be16(&cdb[0]); bd_dl = get_unaligned_be16(&cdb[2]); + + buf = transport_kmap_first_data_page(cmd); + ptr = &buf[offset]; - printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" + pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); while (size) { lba = get_unaligned_be64(&ptr[0]); range = get_unaligned_be32(&ptr[8]); - printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", + pr_debug("UNMAP: Using lba: %llu and range: %u\n", (unsigned long long)lba, range); ret = dev->transport->do_discard(dev, lba, range); if (ret < 0) { - printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", + pr_err("blkdev_issue_discard() failed: %d\n", ret); - return -1; + goto err; } ptr += 16; @@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task) task->task_scsi_status = GOOD; transport_complete_task(task, 1); - return 0; +err: + transport_kunmap_first_data_page(cmd); + + return ret; } /* @@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task) * Note this is not used for TCM/pSCSI passthrough */ static int -target_emulate_write_same(struct se_task *task) +target_emulate_write_same(struct se_task *task, int write_same32) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); - sector_t lba = cmd->t_task->t_task_lba; - unsigned int range; + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; + sector_t range; + sector_t lba = cmd->t_task_lba; + unsigned int num_blocks; int ret; + /* + * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict + * range when non zero is supplied, otherwise calculate the remaining + * range based on ->get_blocks() - starting LBA. + */ + if (write_same32) + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); + else + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); - range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); + if (num_blocks != 0) + range = num_blocks; + else + range = (dev->transport->get_blocks(dev) - lba); - printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", - (unsigned long long)lba, range); + pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", + (unsigned long long)lba, (unsigned long long)range); ret = dev->transport->do_discard(dev, lba, range); if (ret < 0) { - printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); - return -1; + pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); + return ret; } task->task_scsi_status = GOOD; @@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task) int transport_emulate_control_cdb(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; unsigned short service_action; int ret = 0; - switch (cmd->t_task->t_task_cdb[0]) { + switch (cmd->t_task_cdb[0]) { case INQUIRY: ret = target_emulate_inquiry(cmd); break; @@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_modesense(cmd, 1); break; case SERVICE_ACTION_IN: - switch (cmd->t_task->t_task_cdb[1] & 0x1f) { + switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: ret = target_emulate_readcapacity_16(cmd); break; default: - printk(KERN_ERR "Unsupported SA: 0x%02x\n", - cmd->t_task->t_task_cdb[1] & 0x1f); + pr_err("Unsupported SA: 0x%02x\n", + cmd->t_task_cdb[1] & 0x1f); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } break; @@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case UNMAP: if (!dev->transport->do_discard) { - printk(KERN_ERR "UNMAP emulation not supported for: %s\n", + pr_err("UNMAP emulation not supported for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task) break; case WRITE_SAME_16: if (!dev->transport->do_discard) { - printk(KERN_ERR "WRITE_SAME_16 emulation not supported" + pr_err("WRITE_SAME_16 emulation not supported" " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } - ret = target_emulate_write_same(task); + ret = target_emulate_write_same(task, 0); break; case VARIABLE_LENGTH_CMD: service_action = - get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); + get_unaligned_be16(&cmd->t_task_cdb[8]); switch (service_action) { case WRITE_SAME_32: if (!dev->transport->do_discard) { - printk(KERN_ERR "WRITE_SAME_32 SA emulation not" + pr_err("WRITE_SAME_32 SA emulation not" " supported for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } - ret = target_emulate_write_same(task); + ret = target_emulate_write_same(task, 1); break; default: - printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" + pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" " 0x%02x\n", service_action); break; } @@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task) case SYNCHRONIZE_CACHE: case 0x91: /* SYNCHRONIZE_CACHE_16: */ if (!dev->transport->do_sync_cache) { - printk(KERN_ERR - "SYNCHRONIZE_CACHE emulation not supported" + pr_err("SYNCHRONIZE_CACHE emulation not supported" " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task) case WRITE_FILEMARKS: break; default: - printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", - cmd->t_task->t_task_cdb[0], dev->transport->name); + pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n", + cmd->t_task_cdb[0], dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 25c1f49a7d8..b2575d8568c 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -37,6 +37,7 @@ #include <linux/parser.h> #include <linux/syscalls.h> #include <linux/configfs.h> +#include <linux/spinlock.h> #include <target/target_core_base.h> #include <target/target_core_device.h> @@ -52,6 +53,8 @@ #include "target_core_rd.h" #include "target_core_stat.h" +extern struct t10_alua_lu_gp *default_lu_gp; + static struct list_head g_tf_list; static struct mutex g_tf_lock; @@ -61,6 +64,13 @@ struct target_core_configfs_attribute { ssize_t (*store)(void *, const char *, size_t); }; +static struct config_group target_core_hbagroup; +static struct config_group alua_group; +static struct config_group alua_lu_gps_group; + +static DEFINE_SPINLOCK(se_device_lock); +static LIST_HEAD(se_dev_list); + static inline struct se_hba * item_to_hba(struct config_item *item) { @@ -94,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric( { struct target_fabric_configfs *tf; - if (!(name)) + if (!name) return NULL; mutex_lock(&g_tf_lock); list_for_each_entry(tf, &g_tf_list, tf_list) { - if (!(strcmp(tf->tf_name, name))) { + if (!strcmp(tf->tf_name, name)) { atomic_inc(&tf->tf_access_cnt); mutex_unlock(&g_tf_lock); return tf; @@ -120,7 +130,7 @@ static struct config_group *target_core_register_fabric( struct target_fabric_configfs *tf; int ret; - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" + pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); /* * Ensure that TCM subsystem plugins are loaded at this point for @@ -140,7 +150,7 @@ static struct config_group *target_core_register_fabric( * registered, but simply provids auto loading logic for modules with * mkdir(2) system calls with known TCM fabric modules. */ - if (!(strncmp(name, "iscsi", 5))) { + if (!strncmp(name, "iscsi", 5)) { /* * Automatically load the LIO Target fabric module when the * following is called: @@ -149,11 +159,11 @@ static struct config_group *target_core_register_fabric( */ ret = request_module("iscsi_target_mod"); if (ret < 0) { - printk(KERN_ERR "request_module() failed for" + pr_err("request_module() failed for" " iscsi_target_mod.ko: %d\n", ret); return ERR_PTR(-EINVAL); } - } else if (!(strncmp(name, "loopback", 8))) { + } else if (!strncmp(name, "loopback", 8)) { /* * Automatically load the tcm_loop fabric module when the * following is called: @@ -162,25 +172,25 @@ static struct config_group *target_core_register_fabric( */ ret = request_module("tcm_loop"); if (ret < 0) { - printk(KERN_ERR "request_module() failed for" + pr_err("request_module() failed for" " tcm_loop.ko: %d\n", ret); return ERR_PTR(-EINVAL); } } tf = target_core_get_fabric(name); - if (!(tf)) { - printk(KERN_ERR "target_core_get_fabric() failed for %s\n", + if (!tf) { + pr_err("target_core_get_fabric() failed for %s\n", name); return ERR_PTR(-EINVAL); } - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" + pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" " %s\n", tf->tf_name); /* * On a successful target_core_get_fabric() look, the returned * struct target_fabric_configfs *tf will contain a usage reference. */ - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", + pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", &TF_CIT_TMPL(tf)->tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; @@ -192,14 +202,14 @@ static struct config_group *target_core_register_fabric( config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", &TF_CIT_TMPL(tf)->tfc_discovery_cit); - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" + pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); /* * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() */ tf->tf_ops.tf_subsys = tf->tf_subsys; tf->tf_fabric = &tf->tf_group.cg_item; - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" + pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" " for %s\n", name); return &tf->tf_group; @@ -218,18 +228,18 @@ static void target_core_deregister_fabric( struct config_item *df_item; int i; - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" " tf list\n", config_item_name(item)); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" " %s\n", tf->tf_name); atomic_dec(&tf->tf_access_cnt); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" " tf->tf_fabric for %s\n", tf->tf_name); tf->tf_fabric = NULL; - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" " %s\n", config_item_name(item)); tf_group = &tf->tf_group; @@ -296,23 +306,19 @@ struct target_fabric_configfs *target_fabric_configfs_init( { struct target_fabric_configfs *tf; - if (!(fabric_mod)) { - printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); - return NULL; - } if (!(name)) { - printk(KERN_ERR "Unable to locate passed fabric name\n"); - return NULL; + pr_err("Unable to locate passed fabric name\n"); + return ERR_PTR(-EINVAL); } if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { - printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" + pr_err("Passed name: %s exceeds TARGET_FABRIC" "_NAME_SIZE\n", name); - return NULL; + return ERR_PTR(-EINVAL); } tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); - if (!(tf)) - return NULL; + if (!tf) + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tf->tf_list); atomic_set(&tf->tf_access_cnt, 0); @@ -330,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init( list_add_tail(&tf->tf_list, &g_tf_list); mutex_unlock(&g_tf_lock); - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" ">>>>>>>>>>>>>>\n"); - printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" + pr_debug("Initialized struct target_fabric_configfs: %p for" " %s\n", tf, tf->tf_name); return tf; } @@ -361,140 +367,132 @@ static int target_fabric_tf_ops_check( { struct target_core_fabric_ops *tfo = &tf->tf_ops; - if (!(tfo->get_fabric_name)) { - printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); - return -EINVAL; - } - if (!(tfo->get_fabric_proto_ident)) { - printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); - return -EINVAL; - } - if (!(tfo->tpg_get_wwn)) { - printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); + if (!tfo->get_fabric_name) { + pr_err("Missing tfo->get_fabric_name()\n"); return -EINVAL; } - if (!(tfo->tpg_get_tag)) { - printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); + if (!tfo->get_fabric_proto_ident) { + pr_err("Missing tfo->get_fabric_proto_ident()\n"); return -EINVAL; } - if (!(tfo->tpg_get_default_depth)) { - printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); + if (!tfo->tpg_get_wwn) { + pr_err("Missing tfo->tpg_get_wwn()\n"); return -EINVAL; } - if (!(tfo->tpg_get_pr_transport_id)) { - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); + if (!tfo->tpg_get_tag) { + pr_err("Missing tfo->tpg_get_tag()\n"); return -EINVAL; } - if (!(tfo->tpg_get_pr_transport_id_len)) { - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); + if (!tfo->tpg_get_default_depth) { + pr_err("Missing tfo->tpg_get_default_depth()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); + if (!tfo->tpg_get_pr_transport_id) { + pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode_cache)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); + if (!tfo->tpg_get_pr_transport_id_len) { + pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode_write_protect)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); + if (!tfo->tpg_check_demo_mode) { + pr_err("Missing tfo->tpg_check_demo_mode()\n"); return -EINVAL; } - if (!(tfo->tpg_check_prod_mode_write_protect)) { - printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); + if (!tfo->tpg_check_demo_mode_cache) { + pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); return -EINVAL; } - if (!(tfo->tpg_alloc_fabric_acl)) { - printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); + if (!tfo->tpg_check_demo_mode_write_protect) { + pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); return -EINVAL; } - if (!(tfo->tpg_release_fabric_acl)) { - printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); + if (!tfo->tpg_check_prod_mode_write_protect) { + pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); return -EINVAL; } - if (!(tfo->tpg_get_inst_index)) { - printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); + if (!tfo->tpg_alloc_fabric_acl) { + pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); return -EINVAL; } - if (!(tfo->release_cmd_to_pool)) { - printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n"); + if (!tfo->tpg_release_fabric_acl) { + pr_err("Missing tfo->tpg_release_fabric_acl()\n"); return -EINVAL; } - if (!(tfo->release_cmd_direct)) { - printk(KERN_ERR "Missing tfo->release_cmd_direct()\n"); + if (!tfo->tpg_get_inst_index) { + pr_err("Missing tfo->tpg_get_inst_index()\n"); return -EINVAL; } - if (!(tfo->shutdown_session)) { - printk(KERN_ERR "Missing tfo->shutdown_session()\n"); + if (!tfo->release_cmd) { + pr_err("Missing tfo->release_cmd()\n"); return -EINVAL; } - if (!(tfo->close_session)) { - printk(KERN_ERR "Missing tfo->close_session()\n"); + if (!tfo->shutdown_session) { + pr_err("Missing tfo->shutdown_session()\n"); return -EINVAL; } - if (!(tfo->stop_session)) { - printk(KERN_ERR "Missing tfo->stop_session()\n"); + if (!tfo->close_session) { + pr_err("Missing tfo->close_session()\n"); return -EINVAL; } - if (!(tfo->fall_back_to_erl0)) { - printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); + if (!tfo->stop_session) { + pr_err("Missing tfo->stop_session()\n"); return -EINVAL; } - if (!(tfo->sess_logged_in)) { - printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); + if (!tfo->fall_back_to_erl0) { + pr_err("Missing tfo->fall_back_to_erl0()\n"); return -EINVAL; } - if (!(tfo->sess_get_index)) { - printk(KERN_ERR "Missing tfo->sess_get_index()\n"); + if (!tfo->sess_logged_in) { + pr_err("Missing tfo->sess_logged_in()\n"); return -EINVAL; } - if (!(tfo->write_pending)) { - printk(KERN_ERR "Missing tfo->write_pending()\n"); + if (!tfo->sess_get_index) { + pr_err("Missing tfo->sess_get_index()\n"); return -EINVAL; } - if (!(tfo->write_pending_status)) { - printk(KERN_ERR "Missing tfo->write_pending_status()\n"); + if (!tfo->write_pending) { + pr_err("Missing tfo->write_pending()\n"); return -EINVAL; } - if (!(tfo->set_default_node_attributes)) { - printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); + if (!tfo->write_pending_status) { + pr_err("Missing tfo->write_pending_status()\n"); return -EINVAL; } - if (!(tfo->get_task_tag)) { - printk(KERN_ERR "Missing tfo->get_task_tag()\n"); + if (!tfo->set_default_node_attributes) { + pr_err("Missing tfo->set_default_node_attributes()\n"); return -EINVAL; } - if (!(tfo->get_cmd_state)) { - printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); + if (!tfo->get_task_tag) { + pr_err("Missing tfo->get_task_tag()\n"); return -EINVAL; } - if (!(tfo->new_cmd_failure)) { - printk(KERN_ERR "Missing tfo->new_cmd_failure()\n"); + if (!tfo->get_cmd_state) { + pr_err("Missing tfo->get_cmd_state()\n"); return -EINVAL; } - if (!(tfo->queue_data_in)) { - printk(KERN_ERR "Missing tfo->queue_data_in()\n"); + if (!tfo->queue_data_in) { + pr_err("Missing tfo->queue_data_in()\n"); return -EINVAL; } - if (!(tfo->queue_status)) { - printk(KERN_ERR "Missing tfo->queue_status()\n"); + if (!tfo->queue_status) { + pr_err("Missing tfo->queue_status()\n"); return -EINVAL; } - if (!(tfo->queue_tm_rsp)) { - printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); + if (!tfo->queue_tm_rsp) { + pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } - if (!(tfo->set_fabric_sense_len)) { - printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); + if (!tfo->set_fabric_sense_len) { + pr_err("Missing tfo->set_fabric_sense_len()\n"); return -EINVAL; } - if (!(tfo->get_fabric_sense_len)) { - printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); + if (!tfo->get_fabric_sense_len) { + pr_err("Missing tfo->get_fabric_sense_len()\n"); return -EINVAL; } - if (!(tfo->is_state_remove)) { - printk(KERN_ERR "Missing tfo->is_state_remove()\n"); + if (!tfo->is_state_remove) { + pr_err("Missing tfo->is_state_remove()\n"); return -EINVAL; } /* @@ -502,20 +500,20 @@ static int target_fabric_tf_ops_check( * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in * target_core_fabric_configfs.c WWN+TPG group context code. */ - if (!(tfo->fabric_make_wwn)) { - printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); + if (!tfo->fabric_make_wwn) { + pr_err("Missing tfo->fabric_make_wwn()\n"); return -EINVAL; } - if (!(tfo->fabric_drop_wwn)) { - printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); + if (!tfo->fabric_drop_wwn) { + pr_err("Missing tfo->fabric_drop_wwn()\n"); return -EINVAL; } - if (!(tfo->fabric_make_tpg)) { - printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); + if (!tfo->fabric_make_tpg) { + pr_err("Missing tfo->fabric_make_tpg()\n"); return -EINVAL; } - if (!(tfo->fabric_drop_tpg)) { - printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); + if (!tfo->fabric_drop_tpg) { + pr_err("Missing tfo->fabric_drop_tpg()\n"); return -EINVAL; } @@ -533,22 +531,15 @@ static int target_fabric_tf_ops_check( int target_fabric_configfs_register( struct target_fabric_configfs *tf) { - struct config_group *su_group; int ret; - if (!(tf)) { - printk(KERN_ERR "Unable to locate target_fabric_configfs" + if (!tf) { + pr_err("Unable to locate target_fabric_configfs" " pointer\n"); return -EINVAL; } - if (!(tf->tf_subsys)) { - printk(KERN_ERR "Unable to target struct config_subsystem" - " pointer\n"); - return -EINVAL; - } - su_group = &tf->tf_subsys->su_group; - if (!(su_group)) { - printk(KERN_ERR "Unable to locate target struct config_group" + if (!tf->tf_subsys) { + pr_err("Unable to target struct config_subsystem" " pointer\n"); return -EINVAL; } @@ -556,7 +547,7 @@ int target_fabric_configfs_register( if (ret < 0) return ret; - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" ">>>>>>>>>>\n"); return 0; } @@ -565,48 +556,39 @@ EXPORT_SYMBOL(target_fabric_configfs_register); void target_fabric_configfs_deregister( struct target_fabric_configfs *tf) { - struct config_group *su_group; struct configfs_subsystem *su; - if (!(tf)) { - printk(KERN_ERR "Unable to locate passed target_fabric_" + if (!tf) { + pr_err("Unable to locate passed target_fabric_" "configfs\n"); return; } su = tf->tf_subsys; - if (!(su)) { - printk(KERN_ERR "Unable to locate passed tf->tf_subsys" + if (!su) { + pr_err("Unable to locate passed tf->tf_subsys" " pointer\n"); return; } - su_group = &tf->tf_subsys->su_group; - if (!(su_group)) { - printk(KERN_ERR "Unable to locate target struct config_group" - " pointer\n"); - return; - } - - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" ">>>>>>>>>>>>\n"); mutex_lock(&g_tf_lock); if (atomic_read(&tf->tf_access_cnt)) { mutex_unlock(&g_tf_lock); - printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", + pr_err("Non zero tf->tf_access_cnt for fabric %s\n", tf->tf_name); BUG(); } list_del(&tf->tf_list); mutex_unlock(&g_tf_lock); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" " %s\n", tf->tf_name); tf->tf_module = NULL; tf->tf_subsys = NULL; kfree(tf); - printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" ">>>>>\n"); - return; } EXPORT_SYMBOL(target_fabric_configfs_deregister); @@ -627,11 +609,12 @@ static ssize_t target_core_dev_show_attr_##_name( \ \ spin_lock(&se_dev->se_dev_lock); \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) { \ + if (!dev) { \ spin_unlock(&se_dev->se_dev_lock); \ return -ENODEV; \ } \ - rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ + rb = snprintf(page, PAGE_SIZE, "%u\n", \ + (u32)dev->se_sub_dev->se_dev_attrib._name); \ spin_unlock(&se_dev->se_dev_lock); \ \ return rb; \ @@ -650,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \ \ spin_lock(&se_dev->se_dev_lock); \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) { \ + if (!dev) { \ spin_unlock(&se_dev->se_dev_lock); \ return -ENODEV; \ } \ ret = strict_strtoul(page, 0, &val); \ if (ret < 0) { \ spin_unlock(&se_dev->se_dev_lock); \ - printk(KERN_ERR "strict_strtoul() failed with" \ + pr_err("strict_strtoul() failed with" \ " ret: %d\n", ret); \ return -EINVAL; \ } \ @@ -715,6 +698,12 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(is_nonrot); +SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); + +DEF_DEV_ATTRIB(emulate_rest_reord); +SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB_RO(hw_block_size); SE_DEV_ATTR_RO(hw_block_size); @@ -763,6 +752,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tpu.attr, &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_enforce_pr_isids.attr, + &target_core_dev_attrib_is_nonrot.attr, + &target_core_dev_attrib_emulate_rest_reord.attr, &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, @@ -819,7 +810,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( struct se_device *dev; dev = se_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; return sprintf(page, "T10 VPD Unit Serial Number: %s\n", @@ -846,13 +837,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( * VPD Unit Serial Number that OS dependent multipath can depend on. */ if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { - printk(KERN_ERR "Underlying SCSI device firmware provided VPD" + pr_err("Underlying SCSI device firmware provided VPD" " Unit Serial, ignoring request\n"); return -EOPNOTSUPP; } if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { - printk(KERN_ERR "Emulated VPD Unit Serial exceeds" + pr_err("Emulated VPD Unit Serial exceeds" " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); return -EOVERFLOW; } @@ -863,9 +854,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( * could cause negative effects. */ dev = su_dev->se_dev_ptr; - if ((dev)) { + if (dev) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "Unable to set VPD Unit Serial while" + pr_err("Unable to set VPD Unit Serial while" " active %d $FABRIC_MOD exports exist\n", atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; @@ -883,7 +874,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( "%s", strstrip(buf)); su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" + pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" " %s\n", su_dev->t10_wwn.unit_serial); return count; @@ -905,19 +896,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( ssize_t len = 0; dev = se_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; memset(buf, 0, VPD_TMP_BUF_SIZE); spin_lock(&t10_wwn->t10_vpd_lock); list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { - if (!(vpd->protocol_identifier_set)) + if (!vpd->protocol_identifier_set) continue; transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); - if ((len + strlen(buf) >= PAGE_SIZE)) + if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); @@ -952,7 +943,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ ssize_t len = 0; \ \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) \ + if (!dev) \ return -ENODEV; \ \ spin_lock(&t10_wwn->t10_vpd_lock); \ @@ -962,19 +953,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ } \ @@ -984,7 +975,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ } /* - * VPD page 0x83 Assoication: Logical Unit + * VPD page 0x83 Association: Logical Unit */ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00); @@ -1083,7 +1074,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return *len; @@ -1093,7 +1084,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( PR_REG_ISID_ID_LEN); *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", - TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); spin_unlock(&dev->dev_reservation_lock); @@ -1109,13 +1100,13 @@ static ssize_t target_core_dev_pr_show_spc2_res( spin_lock(&dev->dev_reservation_lock); se_nacl = dev->dev_reserved_node_acl; - if (!(se_nacl)) { + if (!se_nacl) { *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return *len; } *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", - TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -1128,10 +1119,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder( { ssize_t len = 0; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - switch (T10_RES(su_dev)->res_type) { + switch (su_dev->t10_pr.res_type) { case SPC3_PERSISTENT_RESERVATIONS: target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, page, &len); @@ -1165,15 +1156,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1202,13 +1193,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; - return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); + return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); } SE_DEV_PR_ATTR_RO(res_pr_generation); @@ -1229,15 +1220,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1245,7 +1236,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( se_nacl = pr_reg->pr_reg_nacl; se_tpg = se_nacl->se_tpg; lun = pr_reg->pr_reg_tg_pt_lun; - tfo = TPG_TFO(se_tpg); + tfo = se_tpg->se_tpg_tfo; len += sprintf(page+len, "SPC-3 Reservation: %s" " Target Node Endpoint: %s\n", tfo->get_fabric_name(), @@ -1276,16 +1267,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( ssize_t len = 0; int reg_count = 0, prf_isid; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; len += sprintf(page+len, "SPC-3 PR Registrations:\n"); - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { memset(buf, 0, 384); @@ -1299,15 +1290,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( &i_buf[0] : "", pr_reg->pr_res_key, pr_reg->pr_res_generation); - if ((len + strlen(buf) >= PAGE_SIZE)) + if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); reg_count++; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); - if (!(reg_count)) + if (!reg_count) len += sprintf(page+len, "None\n"); return len; @@ -1327,15 +1318,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1358,10 +1349,10 @@ static ssize_t target_core_dev_pr_show_attr_res_type( { ssize_t len = 0; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - switch (T10_RES(su_dev)->res_type) { + switch (su_dev->t10_pr.res_type) { case SPC3_PERSISTENT_RESERVATIONS: len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); break; @@ -1389,14 +1380,14 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return sprintf(page, "APTPL Bit Status: %s\n", - (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); + (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); } SE_DEV_PR_ATTR_RO(res_aptpl_active); @@ -1408,10 +1399,10 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1460,14 +1451,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u8 type = 0, scope; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_INFO "Unable to process APTPL metadata while" + pr_debug("Unable to process APTPL metadata while" " active fabric exports exist\n"); return -EINVAL; } @@ -1497,7 +1488,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { - printk(KERN_ERR "APTPL metadata initiator_node=" + pr_err("APTPL metadata initiator_node=" " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", PR_APTPL_MAX_IPORT_LEN); ret = -EINVAL; @@ -1511,7 +1502,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(isid) >= PR_REG_ISID_LEN) { - printk(KERN_ERR "APTPL metadata initiator_isid" + pr_err("APTPL metadata initiator_isid" "= exceeds PR_REG_ISID_LEN: %d\n", PR_REG_ISID_LEN); ret = -EINVAL; @@ -1526,7 +1517,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } ret = strict_strtoull(arg_p, 0, &tmp_ll); if (ret < 0) { - printk(KERN_ERR "strict_strtoull() failed for" + pr_err("strict_strtoull() failed for" " sa_res_key=\n"); goto out; } @@ -1572,7 +1563,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { - printk(KERN_ERR "APTPL metadata target_node=" + pr_err("APTPL metadata target_node=" " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", PR_APTPL_MAX_TPORT_LEN); ret = -EINVAL; @@ -1596,20 +1587,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } } - if (!(i_port) || !(t_port) || !(sa_res_key)) { - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); ret = -EINVAL; goto out; } if (res_holder && !(type)) { - printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" + pr_err("Illegal PR type: 0x%02x for reservation" " holder\n", type); ret = -EINVAL; goto out; } - ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, + ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, i_port, isid, mapped_lun, t_port, tpgt, target_lun, res_holder, all_tg_pt, type); out: @@ -1662,7 +1653,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page) int bl = 0; ssize_t read_bytes = 0; - if (!(se_dev->se_dev_ptr)) + if (!se_dev->se_dev_ptr) return -ENODEV; transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); @@ -1688,8 +1679,8 @@ static ssize_t target_core_store_dev_control( struct se_hba *hba = se_dev->se_dev_hba; struct se_subsystem_api *t = hba->transport; - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate struct se_subsystem_dev>se" "_dev_su_ptr\n"); return -EINVAL; } @@ -1725,7 +1716,7 @@ static ssize_t target_core_store_dev_alias( ssize_t read_bytes; if (count > (SE_DEV_ALIAS_LEN-1)) { - printk(KERN_ERR "alias count: %d exceeds" + pr_err("alias count: %d exceeds" " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, SE_DEV_ALIAS_LEN-1); return -EINVAL; @@ -1735,7 +1726,7 @@ static ssize_t target_core_store_dev_alias( read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", + pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&se_dev->se_dev_group.cg_item), se_dev->se_dev_alias); @@ -1771,7 +1762,7 @@ static ssize_t target_core_store_dev_udev_path( ssize_t read_bytes; if (count > (SE_UDEV_PATH_LEN-1)) { - printk(KERN_ERR "udev_path count: %d exceeds" + pr_err("udev_path count: %d exceeds" " SE_UDEV_PATH_LEN-1: %u\n", (int)count, SE_UDEV_PATH_LEN-1); return -EINVAL; @@ -1781,7 +1772,7 @@ static ssize_t target_core_store_dev_udev_path( read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, "%s", page); - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", + pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&se_dev->se_dev_group.cg_item), se_dev->se_dev_udev_path); @@ -1809,13 +1800,13 @@ static ssize_t target_core_store_dev_enable( char *ptr; ptr = strstr(page, "1"); - if (!(ptr)) { - printk(KERN_ERR "For dev_enable ops, only valid value" + if (!ptr) { + pr_err("For dev_enable ops, only valid value" " is \"1\"\n"); return -EINVAL; } - if ((se_dev->se_dev_ptr)) { - printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" + if (se_dev->se_dev_ptr) { + pr_err("se_dev->se_dev_ptr already set for storage" " object\n"); return -EEXIST; } @@ -1830,7 +1821,7 @@ static ssize_t target_core_store_dev_enable( return -EINVAL; se_dev->se_dev_ptr = dev; - printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" + pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" " %p\n", se_dev->se_dev_ptr); return count; @@ -1854,22 +1845,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page) ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) return len; lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) { - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + if (!lu_gp_mem) { + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" " pointer\n"); return -EINVAL; } spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { lu_ci = &lu_gp->lu_gp_group.cg_item; len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", config_item_name(lu_ci), lu_gp->lu_gp_id); @@ -1893,17 +1884,17 @@ static ssize_t target_core_store_alua_lu_gp( int move = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { + pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&su_dev->se_dev_group.cg_item)); return -EINVAL; } if (count > LU_GROUP_NAME_BUF) { - printk(KERN_ERR "ALUA LU Group Alias too large!\n"); + pr_err("ALUA LU Group Alias too large!\n"); return -EINVAL; } memset(buf, 0, LU_GROUP_NAME_BUF); @@ -1919,27 +1910,27 @@ static ssize_t target_core_store_alua_lu_gp( * core_alua_get_lu_gp_by_name below(). */ lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); - if (!(lu_gp_new)) + if (!lu_gp_new) return -ENODEV; } lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) { + if (!lu_gp_mem) { if (lu_gp_new) core_alua_put_lu_gp_from_name(lu_gp_new); - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" " pointer\n"); return -EINVAL; } spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { /* * Clearing an existing lu_gp association, and replacing * with NULL */ - if (!(lu_gp_new)) { - printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" + if (!lu_gp_new) { + pr_debug("Target_Core_ConfigFS: Releasing %s/%s" " from ALUA LU Group: core/alua/lu_gps/%s, ID:" " %hu\n", config_item_name(&hba->hba_group.cg_item), @@ -1964,7 +1955,7 @@ static ssize_t target_core_store_alua_lu_gp( __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" + pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" " core/alua/lu_gps/%s, ID: %hu\n", (move) ? "Moving" : "Adding", config_item_name(&hba->hba_group.cg_item), @@ -2008,7 +1999,7 @@ static void target_core_dev_release(struct config_item *item) *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` */ if (se_dev->se_dev_ptr) { - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" + pr_debug("Target_Core_ConfigFS: Calling se_free_" "virtual_device() for se_dev_ptr: %p\n", se_dev->se_dev_ptr); @@ -2017,14 +2008,14 @@ static void target_core_dev_release(struct config_item *item) /* * Release struct se_subsystem_dev->se_dev_su_ptr.. */ - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" + pr_debug("Target_Core_ConfigFS: Calling t->free_" "device() for se_dev_su_ptr: %p\n", se_dev->se_dev_su_ptr); t->free_device(se_dev->se_dev_su_ptr); } - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" + pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" "_dev_t: %p\n", se_dev); kfree(se_dev); } @@ -2039,10 +2030,10 @@ static ssize_t target_core_dev_show(struct config_item *item, struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); - if (!(tc_attr->show)) + if (!tc_attr->show) return -EINVAL; - return tc_attr->show((void *)se_dev, page); + return tc_attr->show(se_dev, page); } static ssize_t target_core_dev_store(struct config_item *item, @@ -2055,10 +2046,10 @@ static ssize_t target_core_dev_store(struct config_item *item, struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); - if (!(tc_attr->store)) + if (!tc_attr->store) return -EINVAL; - return tc_attr->store((void *)se_dev, page, count); + return tc_attr->store(se_dev, page, count); } static struct configfs_item_operations target_core_dev_item_ops = { @@ -2098,7 +2089,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( struct t10_alua_lu_gp *lu_gp, char *page) { - if (!(lu_gp->lu_gp_valid_id)) + if (!lu_gp->lu_gp_valid_id) return 0; return sprintf(page, "%hu\n", lu_gp->lu_gp_id); @@ -2115,12 +2106,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( ret = strict_strtoul(page, 0, &lu_gp_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " lu_gp_id\n", ret); return -EINVAL; } if (lu_gp_id > 0x0000ffff) { - printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" + pr_err("ALUA lu_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", lu_gp_id); return -EINVAL; } @@ -2129,7 +2120,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( if (ret < 0) return -EINVAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" " Group: core/alua/lu_gps/%s to ID: %hu\n", config_item_name(&alua_lu_gp_cg->cg_item), lu_gp->lu_gp_id); @@ -2167,7 +2158,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members( cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { - printk(KERN_WARNING "Ran out of lu_gp_show_attr" + pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } @@ -2231,7 +2222,7 @@ static struct config_group *target_core_alua_create_lu_gp( config_group_init_type_name(alua_lu_gp_cg, name, &target_core_alua_lu_gp_cit); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" " Group: core/alua/lu_gps/%s\n", config_item_name(alua_lu_gp_ci)); @@ -2246,7 +2237,7 @@ static void target_core_alua_drop_lu_gp( struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), struct t10_alua_lu_gp, lu_gp_group); - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" " Group: core/alua/lu_gps/%s, ID: %hu\n", config_item_name(item), lu_gp->lu_gp_id); /* @@ -2305,22 +2296,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( unsigned long tmp; int new_state, ret; - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { - printk(KERN_ERR "Unable to do implict ALUA on non valid" + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to do implict ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk("Unable to extract new ALUA access state from" + pr_err("Unable to extract new ALUA access state from" " %s\n", page); return -EINVAL; } new_state = (int)tmp; if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - printk(KERN_ERR "Unable to process implict configfs ALUA" + pr_err("Unable to process implict configfs ALUA" " transition while TPGS_IMPLICT_ALUA is diabled\n"); return -EINVAL; } @@ -2351,8 +2342,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( unsigned long tmp; int new_status, ret; - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { - printk(KERN_ERR "Unable to do set ALUA access status on non" + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to do set ALUA access status on non" " valid tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; @@ -2360,7 +2351,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract new ALUA access status" + pr_err("Unable to extract new ALUA access status" " from %s\n", page); return -EINVAL; } @@ -2369,7 +2360,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( if ((new_status != ALUA_STATUS_NONE) && (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { - printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", + pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; } @@ -2420,12 +2411,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_write_metadata\n"); + pr_err("Unable to extract alua_write_metadata\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_write_metadata:" + pr_err("Illegal value for alua_write_metadata:" " %lu\n", tmp); return -EINVAL; } @@ -2507,7 +2498,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) return 0; return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); @@ -2524,12 +2515,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( ret = strict_strtoul(page, 0, &tg_pt_gp_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " tg_pt_gp_id\n", ret); return -EINVAL; } if (tg_pt_gp_id > 0x0000ffff) { - printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" + pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", tg_pt_gp_id); return -EINVAL; } @@ -2538,7 +2529,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( if (ret < 0) return -EINVAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " + pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " "core/alua/tg_pt_gps/%s to ID: %hu\n", config_item_name(&alua_tg_pt_gp_cg->cg_item), tg_pt_gp->tg_pt_gp_id); @@ -2572,14 +2563,14 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( lun = port->sep_lun; cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" - "/%s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { - printk(KERN_WARNING "Ran out of lu_gp_show_attr" + pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } @@ -2645,7 +2636,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( struct config_item *alua_tg_pt_gp_ci = NULL; tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); - if (!(tg_pt_gp)) + if (!tg_pt_gp) return NULL; alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; @@ -2654,7 +2645,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( config_group_init_type_name(alua_tg_pt_gp_cg, name, &target_core_alua_tg_pt_gp_cit); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" + pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" " Group: alua/tg_pt_gps/%s\n", config_item_name(alua_tg_pt_gp_ci)); @@ -2668,7 +2659,7 @@ static void target_core_alua_drop_tg_pt_gp( struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), struct t10_alua_tg_pt_gp, tg_pt_gp_group); - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" + pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" " Group: alua/tg_pt_gps/%s, ID: %hu\n", config_item_name(item), tg_pt_gp->tg_pt_gp_id); /* @@ -2759,21 +2750,21 @@ static struct config_group *target_core_make_subdev( se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); if (!se_dev) { - printk(KERN_ERR "Unable to allocate memory for" + pr_err("Unable to allocate memory for" " struct se_subsystem_dev\n"); goto unlock; } - INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); - INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); - INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); - spin_lock_init(&se_dev->t10_reservation.registration_lock); - spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); + INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); + spin_lock_init(&se_dev->t10_pr.registration_lock); + spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); - se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; @@ -2783,7 +2774,7 @@ static struct config_group *target_core_make_subdev( dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, GFP_KERNEL); - if (!(dev_cg->default_groups)) + if (!dev_cg->default_groups) goto out; /* * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr @@ -2794,14 +2785,14 @@ static struct config_group *target_core_make_subdev( * configfs tree for device object's struct config_group. */ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate subsystem dependent pointer" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate subsystem dependent pointer" " from allocate_virtdevice()\n"); goto out; } - spin_lock(&se_global->g_device_lock); - list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); - spin_unlock(&se_global->g_device_lock); + spin_lock(&se_device_lock); + list_add_tail(&se_dev->se_dev_node, &se_dev_list); + spin_unlock(&se_device_lock); config_group_init_type_name(&se_dev->se_dev_group, name, &target_core_dev_cit); @@ -2826,14 +2817,14 @@ static struct config_group *target_core_make_subdev( * Add core/$HBA/$DEV/alua/default_tg_pt_gp */ tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); - if (!(tg_pt_gp)) + if (!tg_pt_gp) goto out; - tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(tg_pt_gp_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" + if (!tg_pt_gp_cg->default_groups) { + pr_err("Unable to allocate tg_pt_gp_cg->" "default_groups\n"); goto out; } @@ -2842,28 +2833,28 @@ static struct config_group *target_core_make_subdev( "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; tg_pt_gp_cg->default_groups[1] = NULL; - T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; + se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; /* * Add core/$HBA/$DEV/statistics/ default groups */ - dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; + dev_stat_grp = &se_dev->dev_stat_grps.stat_group; dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, GFP_KERNEL); if (!dev_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); + pr_err("Unable to allocate dev_stat_grp->default_groups\n"); goto out; } target_stat_setup_dev_default_groups(se_dev); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" + pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); mutex_unlock(&hba->hba_access_mutex); return &se_dev->se_dev_group; out: - if (T10_ALUA(se_dev)->default_tg_pt_gp) { - core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); - T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + if (se_dev->t10_alua.default_tg_pt_gp) { + core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); + se_dev->t10_alua.default_tg_pt_gp = NULL; } if (dev_stat_grp) kfree(dev_stat_grp->default_groups); @@ -2896,11 +2887,11 @@ static void target_core_drop_subdev( mutex_lock(&hba->hba_access_mutex); t = hba->transport; - spin_lock(&se_global->g_device_lock); - list_del(&se_dev->g_se_dev_list); - spin_unlock(&se_global->g_device_lock); + spin_lock(&se_device_lock); + list_del(&se_dev->se_dev_node); + spin_unlock(&se_device_lock); - dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; + dev_stat_grp = &se_dev->dev_stat_grps.stat_group; for (i = 0; dev_stat_grp->default_groups[i]; i++) { df_item = &dev_stat_grp->default_groups[i]->cg_item; dev_stat_grp->default_groups[i] = NULL; @@ -2908,7 +2899,7 @@ static void target_core_drop_subdev( } kfree(dev_stat_grp->default_groups); - tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; tg_pt_gp_cg->default_groups[i] = NULL; @@ -2919,7 +2910,7 @@ static void target_core_drop_subdev( * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp * directly from target_core_alua_tg_pt_gp_release(). */ - T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + se_dev->t10_alua.default_tg_pt_gp = NULL; dev_cg = &se_dev->se_dev_group; for (i = 0; dev_cg->default_groups[i]; i++) { @@ -2988,13 +2979,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, ret = strict_strtoul(page, 0, &mode_flag); if (ret < 0) { - printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); + pr_err("Unable to extract hba mode flag: %d\n", ret); return -EINVAL; } spin_lock(&hba->device_lock); - if (!(list_empty(&hba->hba_dev_list))) { - printk(KERN_ERR "Unable to set hba_mode with active devices\n"); + if (!list_empty(&hba->hba_dev_list)) { + pr_err("Unable to set hba_mode with active devices\n"); spin_unlock(&hba->device_lock); return -EINVAL; } @@ -3053,7 +3044,7 @@ static struct config_group *target_core_call_addhbatotarget( memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { - printk(KERN_ERR "Passed *name strlen(): %d exceeds" + pr_err("Passed *name strlen(): %d exceeds" " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), TARGET_CORE_NAME_MAX_LEN); return ERR_PTR(-ENAMETOOLONG); @@ -3061,8 +3052,8 @@ static struct config_group *target_core_call_addhbatotarget( snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); str = strstr(buf, "_"); - if (!(str)) { - printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); + if (!str) { + pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); return ERR_PTR(-EINVAL); } se_plugin_str = buf; @@ -3071,7 +3062,7 @@ static struct config_group *target_core_call_addhbatotarget( * Namely rd_direct and rd_mcp.. */ str2 = strstr(str+1, "_"); - if ((str2)) { + if (str2) { *str2 = '\0'; /* Terminate for *se_plugin_str */ str2++; /* Skip to start of plugin dependent ID */ str = str2; @@ -3082,7 +3073,7 @@ static struct config_group *target_core_call_addhbatotarget( ret = strict_strtoul(str, 0, &plugin_dep_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " plugin_dep_id\n", ret); return ERR_PTR(-EINVAL); } @@ -3135,7 +3126,7 @@ static int __init target_core_init_configfs(void) struct t10_alua_lu_gp *lu_gp; int ret; - printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" + pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); @@ -3145,10 +3136,9 @@ static int __init target_core_init_configfs(void) INIT_LIST_HEAD(&g_tf_list); mutex_init(&g_tf_lock); - init_scsi_index_table(); - ret = init_se_global(); + ret = init_se_kmem_caches(); if (ret < 0) - return -1; + return ret; /* * Create $CONFIGFS/target/core default group for HBA <-> Storage Object * and ALUA Logical Unit Group and Target Port Group infrastructure. @@ -3156,44 +3146,44 @@ static int __init target_core_init_configfs(void) target_cg = &subsys->su_group; target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(target_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); + if (!target_cg->default_groups) { + pr_err("Unable to allocate target_cg->default_groups\n"); goto out_global; } - config_group_init_type_name(&se_global->target_core_hbagroup, + config_group_init_type_name(&target_core_hbagroup, "core", &target_core_cit); - target_cg->default_groups[0] = &se_global->target_core_hbagroup; + target_cg->default_groups[0] = &target_core_hbagroup; target_cg->default_groups[1] = NULL; /* * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ */ - hba_cg = &se_global->target_core_hbagroup; + hba_cg = &target_core_hbagroup; hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(hba_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); + if (!hba_cg->default_groups) { + pr_err("Unable to allocate hba_cg->default_groups\n"); goto out_global; } - config_group_init_type_name(&se_global->alua_group, + config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); - hba_cg->default_groups[0] = &se_global->alua_group; + hba_cg->default_groups[0] = &alua_group; hba_cg->default_groups[1] = NULL; /* * Add ALUA Logical Unit Group and Target Port Group ConfigFS * groups under /sys/kernel/config/target/core/alua/ */ - alua_cg = &se_global->alua_group; + alua_cg = &alua_group; alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(alua_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); + if (!alua_cg->default_groups) { + pr_err("Unable to allocate alua_cg->default_groups\n"); goto out_global; } - config_group_init_type_name(&se_global->alua_lu_gps_group, + config_group_init_type_name(&alua_lu_gps_group, "lu_gps", &target_core_alua_lu_gps_cit); - alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; + alua_cg->default_groups[0] = &alua_lu_gps_group; alua_cg->default_groups[1] = NULL; /* * Add core/alua/lu_gps/default_lu_gp @@ -3202,11 +3192,11 @@ static int __init target_core_init_configfs(void) if (IS_ERR(lu_gp)) goto out_global; - lu_gp_cg = &se_global->alua_lu_gps_group; + lu_gp_cg = &alua_lu_gps_group; lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(lu_gp_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); + if (!lu_gp_cg->default_groups) { + pr_err("Unable to allocate lu_gp_cg->default_groups\n"); goto out_global; } @@ -3214,17 +3204,17 @@ static int __init target_core_init_configfs(void) &target_core_alua_lu_gp_cit); lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; lu_gp_cg->default_groups[1] = NULL; - se_global->default_lu_gp = lu_gp; + default_lu_gp = lu_gp; /* * Register the target_core_mod subsystem with configfs. */ ret = configfs_register_subsystem(subsys); if (ret < 0) { - printk(KERN_ERR "Error %d while registering subsystem %s\n", + pr_err("Error %d while registering subsystem %s\n", ret, subsys->su_group.cg_item.ci_namebuf); goto out_global; } - printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" + pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); /* @@ -3244,9 +3234,9 @@ out: core_dev_release_virtual_lun0(); rd_module_exit(); out_global: - if (se_global->default_lu_gp) { - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + if (default_lu_gp) { + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; } if (lu_gp_cg) kfree(lu_gp_cg->default_groups); @@ -3255,8 +3245,8 @@ out_global: if (hba_cg) kfree(hba_cg->default_groups); kfree(target_cg->default_groups); - release_se_global(); - return -1; + release_se_kmem_caches(); + return ret; } static void __exit target_core_exit_configfs(void) @@ -3266,10 +3256,9 @@ static void __exit target_core_exit_configfs(void) struct config_item *item; int i; - se_global->in_shutdown = 1; subsys = target_core_subsystem[0]; - lu_gp_cg = &se_global->alua_lu_gps_group; + lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; lu_gp_cg->default_groups[i] = NULL; @@ -3278,7 +3267,7 @@ static void __exit target_core_exit_configfs(void) kfree(lu_gp_cg->default_groups); lu_gp_cg->default_groups = NULL; - alua_cg = &se_global->alua_group; + alua_cg = &alua_group; for (i = 0; alua_cg->default_groups[i]; i++) { item = &alua_cg->default_groups[i]->cg_item; alua_cg->default_groups[i] = NULL; @@ -3287,7 +3276,7 @@ static void __exit target_core_exit_configfs(void) kfree(alua_cg->default_groups); alua_cg->default_groups = NULL; - hba_cg = &se_global->target_core_hbagroup; + hba_cg = &target_core_hbagroup; for (i = 0; hba_cg->default_groups[i]; i++) { item = &hba_cg->default_groups[i]->cg_item; hba_cg->default_groups[i] = NULL; @@ -3302,17 +3291,15 @@ static void __exit target_core_exit_configfs(void) configfs_unregister_subsystem(subsys); kfree(subsys->su_group.default_groups); - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; - printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" + pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); core_dev_release_virtual_lun0(); rd_module_exit(); - release_se_global(); - - return; + release_se_kmem_caches(); } MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ba698ea62bb..b38b6c993e6 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1,7 +1,7 @@ /******************************************************************************* * Filename: target_core_device.c (based on iscsi_target_device.c) * - * This file contains the iSCSI Virtual Device and Disk Transport + * This file contains the TCM Virtual Device and Disk Transport * agnostic related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. @@ -54,177 +54,183 @@ static void se_dev_start(struct se_device *dev); static void se_dev_stop(struct se_device *dev); -int transport_get_lun_for_cmd( - struct se_cmd *se_cmd, - unsigned char *cdb, - u32 unpacked_lun) +static struct se_hba *lun0_hba; +static struct se_subsystem_dev *lun0_su_dev; +/* not static, needed by tpg.c */ +struct se_device *g_lun0_dev; + +int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; + struct se_device *dev; unsigned long flags; - int read_only = 0; - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); - deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; - if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - if (se_cmd) { - deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; - - if (se_cmd->data_direction == DMA_TO_DEVICE) { - if (deve->lun_flags & - TRANSPORT_LUNFLAGS_READ_ONLY) { - read_only = 1; - goto out; - } - deve->write_bytes += se_cmd->data_length; - } else if (se_cmd->data_direction == - DMA_FROM_DEVICE) { - deve->read_bytes += se_cmd->data_length; - } + if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -ENODEV; + } + + spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + struct se_dev_entry *deve = se_cmd->se_deve; + + deve->total_cmds++; + deve->total_bytes += se_cmd->data_length; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08x\n", + se_cmd->se_tfo->get_fabric_name(), + unpacked_lun); + spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); + return -EACCES; } + + if (se_cmd->data_direction == DMA_TO_DEVICE) + deve->write_bytes += se_cmd->data_length; + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + deve->read_bytes += se_cmd->data_length; + deve->deve_cmds++; - se_lun = se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } -out: - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); if (!se_lun) { - if (read_only) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + /* + * Use the se_portal_group->tpg_virt_lun0 to allow for + * REPORT_LUNS, et al to be returned when no active + * MappedLUN=0 exists for this Initiator Port. + */ + if (unpacked_lun != 0) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); - return -1; - } else { - /* - * Use the se_portal_group->tpg_virt_lun0 to allow for - * REPORT_LUNS, et al to be returned when no active - * MappedLUN=0 exists for this Initiator Port. - */ - if (unpacked_lun != 0) { - se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" - " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), - unpacked_lun); - return -1; - } - /* - * Force WRITE PROTECT for virtual LUN 0 - */ - if ((se_cmd->data_direction != DMA_FROM_DEVICE) && - (se_cmd->data_direction != DMA_NONE)) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; - } -#if 0 - printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", - CMD_TFO(se_cmd)->get_fabric_name()); -#endif - se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; - se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; - se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + return -ENODEV; } + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -EACCES; + } + + se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->orig_fe_lun = 0; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - { - struct se_device *dev = se_lun->lun_se_dev; - spin_lock_irq(&dev->stats_lock); + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + + /* TODO: get rid of this and use atomics for stats */ + dev = se_lun->lun_se_dev; + spin_lock_irqsave(&dev->stats_lock, flags); dev->num_cmds++; if (se_cmd->data_direction == DMA_TO_DEVICE) dev->write_bytes += se_cmd->data_length; else if (se_cmd->data_direction == DMA_FROM_DEVICE) dev->read_bytes += se_cmd->data_length; - spin_unlock_irq(&dev->stats_lock); - } + spin_unlock_irqrestore(&dev->stats_lock, flags); /* * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used * for tracking state of struct se_cmds during LUN shutdown events. */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); - atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); -#if 0 - printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", - CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); -#endif + list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); + atomic_set(&se_cmd->transport_lun_active, 1); spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_cmd); +EXPORT_SYMBOL(transport_lookup_cmd_lun); -int transport_get_lun_for_tmr( - struct se_cmd *se_cmd, - u32 unpacked_lun) +int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; + unsigned long flags; + + if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + return -ENODEV; + } + + spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags); + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + deve = se_cmd->se_deve; - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); - deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; - dev = se_lun->lun_se_dev; + se_tmr->tmr_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; -/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ + se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); if (!se_lun) { - printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - se_tmr->tmr_dev = dev; - spin_lock(&dev->se_tmr_lock); - list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); - spin_unlock(&dev->se_tmr_lock); + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + se_tmr->tmr_dev = se_lun->lun_se_dev; + + spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); + list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); + spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_tmr); +EXPORT_SYMBOL(transport_lookup_tmr_lun); /* * This function is called from core_scsi3_emulate_pro_register_and_move() @@ -249,17 +255,17 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; lun = deve->se_lun; - if (!(lun)) { - printk(KERN_ERR "%s device entries device pointer is" + if (!lun) { + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } port = lun->lun_sep; - if (!(port)) { - printk(KERN_ERR "%s device entries device pointer is" + if (!port) { + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } if (port->sep_rtpi != rtpi) @@ -295,9 +301,9 @@ int core_free_device_list_for_node( continue; if (!deve->se_lun) { - printk(KERN_ERR "%s device entries device pointer is" + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } lun = deve->se_lun; @@ -323,8 +329,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; spin_unlock_irq(&se_nacl->device_list_lock); - - return; } void core_update_device_list_access( @@ -344,8 +348,6 @@ void core_update_device_list_access( deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } spin_unlock_irq(&nacl->device_list_lock); - - return; } /* core_update_device_list_for_node(): @@ -370,7 +372,7 @@ int core_update_device_list_for_node( * struct se_dev_entry pointers below as logic in * core_alua_do_transition_tg_pt() depends on these being present. */ - if (!(enable)) { + if (!enable) { /* * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explicitly concerted to MappedLUNs -> @@ -393,18 +395,18 @@ int core_update_device_list_for_node( */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { - printk(KERN_ERR "struct se_dev_entry->se_lun_acl" + pr_err("struct se_dev_entry->se_lun_acl" " already set for demo mode -> explict" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } if (deve->se_lun != lun) { - printk(KERN_ERR "struct se_dev_entry->se_lun does" + pr_err("struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } deve->se_lun_acl = lun_acl; trans = 1; @@ -492,8 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) spin_lock_bh(&tpg->acl_node_lock); } spin_unlock_bh(&tpg->acl_node_lock); - - return; } static struct se_port *core_alloc_port(struct se_device *dev) @@ -501,9 +501,9 @@ static struct se_port *core_alloc_port(struct se_device *dev) struct se_port *port, *port_tmp; port = kzalloc(sizeof(struct se_port), GFP_KERNEL); - if (!(port)) { - printk(KERN_ERR "Unable to allocate struct se_port\n"); - return NULL; + if (!port) { + pr_err("Unable to allocate struct se_port\n"); + return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&port->sep_alua_list); INIT_LIST_HEAD(&port->sep_list); @@ -513,10 +513,10 @@ static struct se_port *core_alloc_port(struct se_device *dev) spin_lock(&dev->se_port_lock); if (dev->dev_port_count == 0x0000ffff) { - printk(KERN_WARNING "Reached dev->dev_port_count ==" + pr_warn("Reached dev->dev_port_count ==" " 0x0000ffff\n"); spin_unlock(&dev->se_port_lock); - return NULL; + return ERR_PTR(-ENOSPC); } again: /* @@ -532,7 +532,7 @@ again: * 3h to FFFFh Relative port 3 through 65 535 */ port->sep_rtpi = dev->dev_rpti_counter++; - if (!(port->sep_rtpi)) + if (!port->sep_rtpi) goto again; list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { @@ -554,7 +554,7 @@ static void core_export_port( struct se_port *port, struct se_lun *lun) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; spin_lock(&dev->se_port_lock); @@ -567,20 +567,20 @@ static void core_export_port( list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { + if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { - printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" + pr_err("Unable to allocate t10_alua_tg_pt" "_gp_member_t\n"); return; } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" + pr_debug("%s/%s: Adding to default ALUA Target Port" " Group: alua/default_tg_pt_gp\n", - TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); + dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); } dev->dev_port_count++; @@ -607,8 +607,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port) list_del(&port->sep_list); dev->dev_port_count--; kfree(port); - - return; } int core_dev_export( @@ -619,8 +617,8 @@ int core_dev_export( struct se_port *port; port = core_alloc_port(dev); - if (!(port)) - return -1; + if (IS_ERR(port)) + return PTR_ERR(port); lun->lun_se_dev = dev; se_dev_start(dev); @@ -657,33 +655,35 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) { struct se_dev_entry *deve; struct se_lun *se_lun; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; + unsigned char *buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) break; - if (!(se_task)) { - printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); + if (!se_task) { + pr_err("Unable to locate struct se_task for struct se_cmd\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } + buf = transport_kmap_first_data_page(se_cmd); + /* * If no struct se_session pointer is present, this struct se_cmd is * coming via a target_core_mod PASSTHROUGH op, and not through * a $FABRIC_MOD. In that case, report LUN=0 only. */ - if (!(se_sess)) { + if (!se_sess) { int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); lun_count = 1; goto done; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { - deve = &SE_NODE_ACL(se_sess)->device_list[i]; + deve = &se_sess->se_node_acl->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; se_lun = deve->se_lun; @@ -700,12 +700,13 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) offset += 8; cdb_offset += 8; } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); /* * See SPC3 r07, page 159. */ done: + transport_kunmap_first_data_page(se_cmd); lun_count *= 8; buf[0] = ((lun_count >> 24) & 0xff); buf[1] = ((lun_count >> 16) & 0xff); @@ -744,26 +745,20 @@ void se_release_device_for_hba(struct se_device *dev) core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); - - return; } void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; - spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); + spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); list_for_each_entry_safe(vpd, vpd_tmp, - &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { + &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { list_del(&vpd->vpd_list); kfree(vpd); } - spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); - - return; + spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); } /* se_free_virtual_device(): @@ -822,12 +817,13 @@ static void se_dev_stop(struct se_device *dev) int se_dev_check_online(struct se_device *dev) { + unsigned long flags; int ret; - spin_lock_irq(&dev->dev_status_lock); + spin_lock_irqsave(&dev->dev_status_lock, flags); ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; - spin_unlock_irq(&dev->dev_status_lock); + spin_unlock_irqrestore(&dev->dev_status_lock, flags); return ret; } @@ -849,59 +845,61 @@ void se_dev_set_default_attribs( { struct queue_limits *limits = &dev_limits->limits; - DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; - DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; - DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; - DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; - DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; - DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; - DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; - DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; - DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; - DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; + dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; + dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; + dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; + dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; + dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; + dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values * if blk_queue_discard()==1 */ - DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; - DEV_ATTRIB(dev)->max_unmap_block_desc_count = - DA_MAX_UNMAP_BLOCK_DESC_COUNT; - DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; - DEV_ATTRIB(dev)->unmap_granularity_alignment = + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + DA_MAX_UNMAP_BLOCK_DESC_COUNT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; /* * block_size is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; - DEV_ATTRIB(dev)->block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* * max_sectors is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; - DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via * configfs. */ - DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; /* * queue_depth is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; - DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; } int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) { if (task_timeout > DA_TASK_TIMEOUT_MAX) { - printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" + pr_err("dev[%p]: Passed task_timeout: %u larger then" " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); - return -1; + return -EINVAL; } else { - DEV_ATTRIB(dev)->task_timeout = task_timeout; - printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", + dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; + pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", dev, task_timeout); } @@ -912,9 +910,9 @@ int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) { - DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; - printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_lba_count); + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; + pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); return 0; } @@ -922,9 +920,10 @@ int se_dev_set_max_unmap_block_desc_count( struct se_device *dev, u32 max_unmap_block_desc_count) { - DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; - printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + max_unmap_block_desc_count; + pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); return 0; } @@ -932,9 +931,9 @@ int se_dev_set_unmap_granularity( struct se_device *dev, u32 unmap_granularity) { - DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; - printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity); + dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; + pr_debug("dev[%p]: Set unmap_granularity: %u\n", + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); return 0; } @@ -942,109 +941,109 @@ int se_dev_set_unmap_granularity_alignment( struct se_device *dev, u32 unmap_granularity_alignment) { - DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; - printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; + pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); return 0; } int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); - return -1; + if (dev->transport->dpo_emulated == NULL) { + pr_err("dev->transport->dpo_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); - return -1; + if (dev->transport->dpo_emulated(dev) == 0) { + pr_err("dev->transport->dpo_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_dpo = flag; - printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" - " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); + dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; + pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" + " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); return 0; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); - return -1; + if (dev->transport->fua_write_emulated == NULL) { + pr_err("dev->transport->fua_write_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); - return -1; + if (dev->transport->fua_write_emulated(dev) == 0) { + pr_err("dev->transport->fua_write_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_write = flag; - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_write); + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; + pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); return 0; } int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); - return -1; + if (dev->transport->fua_read_emulated == NULL) { + pr_err("dev->transport->fua_read_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); - return -1; + if (dev->transport->fua_read_emulated(dev) == 0) { + pr_err("dev->transport->fua_read_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_read = flag; - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_read); + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; + pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); return 0; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); - return -1; + if (dev->transport->write_cache_emulated == NULL) { + pr_err("dev->transport->write_cache_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); - return -1; + if (dev->transport->write_cache_emulated(dev) == 0) { + pr_err("dev->transport->write_cache_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_write_cache = flag; - printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_write_cache); + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; + pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", + dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); return 0; } int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1) && (flag != 2)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " UA_INTRLCK_CTRL while dev_export_obj: %d count" " exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; - printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; + pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", + dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); return 0; } @@ -1052,19 +1051,19 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) int se_dev_set_emulate_tas(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" + pr_err("dev[%p]: Unable to change SE Device TAS while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_tas = flag; - printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", - dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); + dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; + pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", + dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); return 0; } @@ -1072,20 +1071,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { - printk(KERN_ERR "Generic Block Discard not supported\n"); + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpu = flag; - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", + dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", dev, flag); return 0; } @@ -1093,20 +1092,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + pr_err("Illegal value %d\n", flag); + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { - printk(KERN_ERR "Generic Block Discard not supported\n"); + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpws = flag; - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", + dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; + pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", dev, flag); return 0; } @@ -1114,12 +1113,36 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; + pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, + (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); + return 0; +} + +int se_dev_set_is_nonrot(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; + } + dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; + pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", + dev, flag); + return 0; +} + +int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) +{ + if (flag != 0) { + printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" + " reordering not implemented\n", dev); + return -ENOSYS; } - DEV_ATTRIB(dev)->enforce_pr_isids = flag; - printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, - (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); + dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; + pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); return 0; } @@ -1131,44 +1154,44 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) u32 orig_queue_depth = dev->queue_depth; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" + pr_err("dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - if (!(queue_depth)) { - printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" + if (!queue_depth) { + pr_err("dev[%p]: Illegal ZERO value for queue" "_depth\n", dev); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { - printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { + pr_err("dev[%p]: Passed queue_depth: %u" " exceeds TCM/SE_Device TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } else { - if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { - printk(KERN_ERR "dev[%p]: Passed queue_depth:" + if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { + pr_err("dev[%p]: Passed queue_depth:" " %u exceeds TCM/SE_Device MAX" " TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } } - DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; if (queue_depth > orig_queue_depth) atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); else if (queue_depth < orig_queue_depth) atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); - printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", + pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); return 0; } @@ -1178,50 +1201,50 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) int force = 0; /* Force setting for VDEVS */ if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " max_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - if (!(max_sectors)) { - printk(KERN_ERR "dev[%p]: Illegal ZERO value for" + if (!max_sectors) { + pr_err("dev[%p]: Illegal ZERO value for" " max_sectors\n", dev); - return -1; + return -EINVAL; } if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" + pr_err("dev[%p]: Passed max_sectors: %u less than" " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MIN); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { + pr_err("dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors:" " %u\n", dev, max_sectors, - DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } } else { - if (!(force) && (max_sectors > - DEV_ATTRIB(dev)->hw_max_sectors)) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + if (!force && (max_sectors > + dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { + pr_err("dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors" ": %u, use force=1 to override.\n", dev, - max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + pr_err("dev[%p]: Passed max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:" " %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MAX); - return -1; + return -EINVAL; } } - DEV_ATTRIB(dev)->max_sectors = max_sectors; - printk("dev[%p]: SE Device max_sectors changed to %u\n", + dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; + pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", dev, max_sectors); return 0; } @@ -1229,25 +1252,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " optimal_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + pr_err("dev[%p]: Passed optimal_sectors cannot be" " changed for TCM/pSCSI\n", dev); return -EINVAL; } - if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { - printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" + if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" " greater than max_sectors: %u\n", dev, - optimal_sectors, DEV_ATTRIB(dev)->max_sectors); + optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); return -EINVAL; } - DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; - printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", + dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; + pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", dev, optimal_sectors); return 0; } @@ -1255,31 +1278,31 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) int se_dev_set_block_size(struct se_device *dev, u32 block_size) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" + pr_err("dev[%p]: Unable to change SE Device block_size" " while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if ((block_size != 512) && (block_size != 1024) && (block_size != 2048) && (block_size != 4096)) { - printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" + pr_err("dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n", dev, block_size); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + pr_err("dev[%p]: Not allowed to change block_size for" " Physical Device, use for Linux/SCSI to change" " block_size for underlying hardware\n", dev); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->block_size = block_size; - printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", + dev->se_sub_dev->se_dev_attrib.block_size = block_size; + pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); return 0; } @@ -1294,13 +1317,13 @@ struct se_lun *core_dev_add_lun( u32 lun_access = 0; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { - printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", + pr_err("Unable to export struct se_device while dev_access_obj: %d\n", atomic_read(&dev->dev_access_obj.obj_access_count)); return NULL; } lun_p = core_tpg_pre_addlun(tpg, lun); - if ((IS_ERR(lun_p)) || !(lun_p)) + if ((IS_ERR(lun_p)) || !lun_p) return NULL; if (dev->dev_flags & DF_READ_ONLY) @@ -1311,15 +1334,15 @@ struct se_lun *core_dev_add_lun( if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) return NULL; - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" - " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, - TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); + pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" + " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); /* * Update LUN maps for dynamically added initiators when * generate_node_acl is enabled. */ - if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { + if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { @@ -1347,15 +1370,15 @@ int core_dev_del_lun( int ret = 0; lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); - if (!(lun)) + if (!lun) return ret; core_tpg_post_dellun(tpg, lun); - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" - " device object\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, - TPG_TFO(tpg)->get_fabric_name()); + pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" + " device object\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name()); return 0; } @@ -1366,21 +1389,21 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" "_PER_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not free on" + pr_err("%s Logical Unit Number: %u is not free on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1399,21 +1422,21 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" "_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1432,19 +1455,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_node_acl *nacl; if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { - printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", - TPG_TFO(tpg)->get_fabric_name()); + pr_err("%s InitiatorName exceeds maximum size.\n", + tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!(nacl)) { + if (!nacl) { *ret = -EINVAL; return NULL; } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); - if (!(lacl)) { - printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); + if (!lacl) { + pr_err("Unable to allocate memory for struct se_lun_acl.\n"); *ret = -ENOMEM; return NULL; } @@ -1467,16 +1490,16 @@ int core_dev_add_initiator_node_lun_acl( struct se_node_acl *nacl; lun = core_dev_get_lun(tpg, unpacked_lun); - if (!(lun)) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + if (!lun) { + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return -EINVAL; } nacl = lacl->se_lun_nacl; - if (!(nacl)) + if (!nacl) return -EINVAL; if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && @@ -1495,9 +1518,9 @@ int core_dev_add_initiator_node_lun_acl( smp_mb__after_atomic_inc(); spin_unlock(&lun->lun_acl_lock); - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " - " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, + pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " + " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", lacl->initiatorname); /* @@ -1520,7 +1543,7 @@ int core_dev_del_initiator_node_lun_acl( struct se_node_acl *nacl; nacl = lacl->se_lun_nacl; - if (!(nacl)) + if (!nacl) return -EINVAL; spin_lock(&lun->lun_acl_lock); @@ -1534,10 +1557,10 @@ int core_dev_del_initiator_node_lun_acl( lacl->se_lun = NULL; - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" + pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" " InitiatorNode: %s Mapped LUN: %u\n", - TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->initiatorname, lacl->mapped_lun); return 0; @@ -1547,10 +1570,10 @@ void core_dev_free_initiator_node_lun_acl( struct se_portal_group *tpg, struct se_lun_acl *lacl) { - printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" - " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), - TPG_TFO(tpg)->get_fabric_name(), + pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" + " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun); kfree(lacl); @@ -1565,64 +1588,64 @@ int core_dev_setup_virtual_lun0(void) char buf[16]; int ret; - hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); + hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); if (IS_ERR(hba)) return PTR_ERR(hba); - se_global->g_lun0_hba = hba; + lun0_hba = hba; t = hba->transport; se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); - if (!(se_dev)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!se_dev) { + pr_err("Unable to allocate memory for" " struct se_subsystem_dev\n"); ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); - INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); - INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); - spin_lock_init(&se_dev->t10_reservation.registration_lock); - spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); + INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); + spin_lock_init(&se_dev->t10_pr.registration_lock); + spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); - se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; se_dev->se_dev_hba = hba; se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate subsystem dependent pointer" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate subsystem dependent pointer" " from allocate_virtdevice()\n"); ret = -ENOMEM; goto out; } - se_global->g_lun0_su_dev = se_dev; + lun0_su_dev = se_dev; memset(buf, 0, 16); sprintf(buf, "rd_pages=8"); t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); - if (!(dev) || IS_ERR(dev)) { - ret = -ENOMEM; + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); goto out; } se_dev->se_dev_ptr = dev; - se_global->g_lun0_dev = dev; + g_lun0_dev = dev; return 0; out: - se_global->g_lun0_su_dev = NULL; + lun0_su_dev = NULL; kfree(se_dev); - if (se_global->g_lun0_hba) { - core_delete_hba(se_global->g_lun0_hba); - se_global->g_lun0_hba = NULL; + if (lun0_hba) { + core_delete_hba(lun0_hba); + lun0_hba = NULL; } return ret; } @@ -1630,14 +1653,14 @@ out: void core_dev_release_virtual_lun0(void) { - struct se_hba *hba = se_global->g_lun0_hba; - struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; + struct se_hba *hba = lun0_hba; + struct se_subsystem_dev *su_dev = lun0_su_dev; - if (!(hba)) + if (!hba) return; - if (se_global->g_lun0_dev) - se_free_virtual_device(se_global->g_lun0_dev, hba); + if (g_lun0_dev) + se_free_virtual_device(g_lun0_dev, hba); kfree(su_dev); core_delete_hba(hba); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 07ab5a3bb8e..f1654694f4e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) cit->ct_group_ops = _group_ops; \ cit->ct_attrs = _attrs; \ cit->ct_owner = tf->tf_module; \ - printk("Setup generic %s\n", __stringify(_name)); \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ } /* Start of tfc_tpg_mappedlun_cit */ @@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link( /* * Ensure that the source port exists */ - if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { - printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" + if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { + pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" "_tpg does not exist\n"); return -EINVAL; } @@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link( * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT */ if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { - printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", + pr_err("Illegal Initiator ACL SymLink outside of %s\n", config_item_name(wwn_ci)); return -EINVAL; } if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { - printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" + pr_err("Illegal Initiator ACL Symlink outside of %s" " TPGT: %s\n", config_item_name(wwn_ci), config_item_name(tpg_ci)); return -EINVAL; @@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link( lun_access = deve->lun_flags; else lun_access = - (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( + (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : TRANSPORT_LUNFLAGS_READ_WRITE; spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); @@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink( /* * Determine if the underlying MappedLUN has already been released.. */ - if (!(deve->se_lun)) + if (!deve->se_lun) return 0; lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); @@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect( TRANSPORT_LUNFLAGS_READ_WRITE, lacl->se_lun_nacl); - printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" + pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" " Mapped LUN: %u Write Protect bit to %s\n", - TPG_TFO(se_tpg)->get_fabric_name(), + se_tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); return count; @@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun( int ret = 0; acl_ci = &group->cg_item; - if (!(acl_ci)) { - printk(KERN_ERR "Unable to locatel acl_ci\n"); + if (!acl_ci) { + pr_err("Unable to locatel acl_ci\n"); return NULL; } buf = kzalloc(strlen(name) + 1, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate memory for name buf\n"); + if (!buf) { + pr_err("Unable to allocate memory for name buf\n"); return ERR_PTR(-ENOMEM); } snprintf(buf, strlen(name) + 1, "%s", name); @@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun( * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. */ if (strstr(buf, "lun_") != buf) { - printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" + pr_err("Unable to locate \"lun_\" from buf: %s" " name: %s\n", buf, name); ret = -EINVAL; goto out; @@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun( lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, config_item_name(acl_ci), &ret); - if (!(lacl)) { + if (!lacl) { ret = -EINVAL; goto out; } @@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun( lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!lacl_cg->default_groups) { - printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); + pr_err("Unable to allocate lacl_cg->default_groups\n"); ret = -ENOMEM; goto out; } @@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun( lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; - ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + ml_stat_grp = &lacl->ml_stat_grps.stat_group; ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!ml_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); + pr_err("Unable to allocate ml_stat_grp->default_groups\n"); ret = -ENOMEM; goto out; } @@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun( struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; int i; - ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + ml_stat_grp = &lacl->ml_stat_grps.stat_group; for (i = 0; ml_stat_grp->default_groups[i]; i++) { df_item = &ml_stat_grp->default_groups[i]->cg_item; ml_stat_grp->default_groups[i] = NULL; @@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl( struct se_node_acl *se_nacl; struct config_group *nacl_cg; - if (!(tf->tf_ops.fabric_make_nodeacl)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); + if (!tf->tf_ops.fabric_make_nodeacl) { + pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); return ERR_PTR(-ENOSYS); } @@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np( struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_tpg_np *se_tpg_np; - if (!(tf->tf_ops.fabric_make_np)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); + if (!tf->tf_ops.fabric_make_np) { + pr_err("tf->tf_ops.fabric_make_np is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); - if (!(se_tpg_np) || IS_ERR(se_tpg_np)) + if (!se_tpg_np || IS_ERR(se_tpg_np)) return ERR_PTR(-EINVAL); se_tpg_np->tpg_np_parent = se_tpg; @@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); @@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); @@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_offline_bit(lun, page); @@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); @@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_status(lun, page); @@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); @@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); @@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); @@ -781,13 +757,13 @@ static int target_fabric_port_link( tf = se_tpg->se_tpg_wwn->wwn_tf; if (lun->lun_se_dev != NULL) { - printk(KERN_ERR "Port Symlink already exists\n"); + pr_err("Port Symlink already exists\n"); return -EEXIST; } dev = se_dev->se_dev_ptr; - if (!(dev)) { - printk(KERN_ERR "Unable to locate struct se_device pointer from" + if (!dev) { + pr_err("Unable to locate struct se_device pointer from" " %s\n", config_item_name(se_dev_ci)); ret = -ENODEV; goto out; @@ -795,8 +771,8 @@ static int target_fabric_port_link( lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, lun->unpacked_lun); - if ((IS_ERR(lun_p)) || !(lun_p)) { - printk(KERN_ERR "core_dev_add_lun() failed\n"); + if (IS_ERR(lun_p) || !lun_p) { + pr_err("core_dev_add_lun() failed\n"); ret = -EINVAL; goto out; } @@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun( int errno; if (strstr(name, "lun_") != name) { - printk(KERN_ERR "Unable to locate \'_\" in" + pr_err("Unable to locate \'_\" in" " \"lun_$LUN_NUMBER\"\n"); return ERR_PTR(-EINVAL); } @@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun( return ERR_PTR(-EINVAL); lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); - if (!(lun)) + if (!lun) return ERR_PTR(-EINVAL); lun_cg = &lun->lun_group; lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!lun_cg->default_groups) { - printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); + pr_err("Unable to allocate lun_cg->default_groups\n"); return ERR_PTR(-ENOMEM); } @@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun( lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; - port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + port_stat_grp = &lun->port_stat_grps.stat_group; port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!port_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); + pr_err("Unable to allocate port_stat_grp->default_groups\n"); errno = -ENOMEM; goto out; } @@ -941,7 +917,7 @@ static void target_fabric_drop_lun( struct config_group *lun_cg, *port_stat_grp; int i; - port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + port_stat_grp = &lun->port_stat_grps.stat_group; for (i = 0; port_stat_grp->default_groups[i]; i++) { df_item = &port_stat_grp->default_groups[i]->cg_item; port_stat_grp->default_groups[i] = NULL; @@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg( struct target_fabric_configfs *tf = wwn->wwn_tf; struct se_portal_group *se_tpg; - if (!(tf->tf_ops.fabric_make_tpg)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); + if (!tf->tf_ops.fabric_make_tpg) { + pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); - if (!(se_tpg) || IS_ERR(se_tpg)) + if (!se_tpg || IS_ERR(se_tpg)) return ERR_PTR(-EINVAL); /* * Setup default groups from pre-allocated se_tpg->tpg_default_groups @@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn( struct target_fabric_configfs, tf_group); struct se_wwn *wwn; - if (!(tf->tf_ops.fabric_make_wwn)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); + if (!tf->tf_ops.fabric_make_wwn) { + pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); return ERR_PTR(-ENOSYS); } wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); - if (!(wwn) || IS_ERR(wwn)) + if (!wwn || IS_ERR(wwn)) return ERR_PTR(-EINVAL); wwn->wwn_tf = tf; diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 1e193f32489..c4ea3a9a555 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -25,6 +25,7 @@ * ******************************************************************************/ +#include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/spinlock.h> @@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id( int *format_code, unsigned char *buf) { - unsigned char binary, *ptr; - int i; - u32 off = 4; + unsigned char *ptr; + /* * Set PROTOCOL IDENTIFIER to 6h for SAS */ @@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id( */ ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ - for (i = 0; i < 16; i += 2) { - binary = transport_asciihex_to_binaryhex(&ptr[i]); - buf[off++] = binary; - } + hex2bin(&buf[4], ptr, 8); + /* * The SAS Transport ID is a hardcoded 24-byte length */ @@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id( int *format_code, unsigned char *buf) { - unsigned char binary, *ptr; + unsigned char *ptr; int i; u32 off = 8; /* @@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id( ptr = &se_nacl->initiatorname[0]; for (i = 0; i < 24; ) { - if (!(strncmp(&ptr[i], ":", 1))) { + if (!strncmp(&ptr[i], ":", 1)) { i++; continue; } - binary = transport_asciihex_to_binaryhex(&ptr[i]); - buf[off++] = binary; + hex2bin(&buf[off++], &ptr[i], 1); i += 2; } /* @@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id( * Reserved */ if ((format_code != 0x00) && (format_code != 0x40)) { - printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" + pr_err("Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); return NULL; } @@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id( tid_len += padding; if ((add_len + 4) != tid_len) { - printk(KERN_INFO "LIO-Target Extracted add_len: %hu " + pr_debug("LIO-Target Extracted add_len: %hu " "does not match calculated tid_len: %u," " using tid_len instead\n", add_len+4, tid_len); *out_tid_len = tid_len; @@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id( */ if (format_code == 0x40) { p = strstr((char *)&buf[4], ",i,0x"); - if (!(p)) { - printk(KERN_ERR "Unable to locate \",i,0x\" seperator" + if (!p) { + pr_err("Unable to locate \",i,0x\" seperator" " for Initiator port identifier: %s\n", (char *)&buf[4]); return NULL; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 150c4305f38..bc1b33639b8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -42,18 +42,6 @@ #include "target_core_file.h" -#if 1 -#define DEBUG_FD_CACHE(x...) printk(x) -#else -#define DEBUG_FD_CACHE(x...) -#endif - -#if 1 -#define DEBUG_FD_FUA(x...) printk(x) -#else -#define DEBUG_FD_FUA(x...) -#endif - static struct se_subsystem_api fileio_template; /* fd_attach_hba(): (Part of se_subsystem_api_t template) @@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) struct fd_host *fd_host; fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); - if (!(fd_host)) { - printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); - return -1; + if (!fd_host) { + pr_err("Unable to allocate memory for struct fd_host\n"); + return -ENOMEM; } fd_host->fd_host_id = host_id; - atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); - hba->hba_ptr = (void *) fd_host; + hba->hba_ptr = fd_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" + pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" - " Target Core with TCQ Depth: %d MaxSectors: %u\n", - hba->hba_id, fd_host->fd_host_id, - atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" + " MaxSectors: %u\n", + hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); return 0; } @@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba) { struct fd_host *fd_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" + pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" " Target Core\n", hba->hba_id, fd_host->fd_host_id); kfree(fd_host); @@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); - if (!(fd_dev)) { - printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); + if (!fd_dev) { + pr_err("Unable to allocate memory for struct fd_dev\n"); return NULL; } fd_dev->fd_host = fd_host; - printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); + pr_debug("FILEIO: Allocated fd_dev for %p\n", name); return fd_dev; } @@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice( set_fs(old_fs); if (IS_ERR(dev_p)) { - printk(KERN_ERR "getname(%s) failed: %lu\n", + pr_err("getname(%s) failed: %lu\n", fd_dev->fd_dev_name, IS_ERR(dev_p)); ret = PTR_ERR(dev_p); goto fail; @@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice( file = filp_open(dev_p, flags, 0600); if (IS_ERR(file)) { - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", dev_p); ret = PTR_ERR(file); goto fail; } if (!file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", dev_p); goto fail; } fd_dev->fd_file = file; @@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice( fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_block_size); - printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" + pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n", fd_dev->fd_dev_size, div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { - printk(KERN_ERR "FILEIO: Missing fd_dev_size=" + pr_err("FILEIO: Missing fd_dev_size=" " parameter, and no backing struct" " block_device\n"); goto fail; @@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice( dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, &fileio_template, - se_dev, dev_flags, (void *)fd_dev, + se_dev, dev_flags, fd_dev, &dev_limits, "FILEIO", FD_VERSION); - if (!(dev)) + if (!dev) goto fail; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_queue_depth = dev->queue_depth; - printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," + pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); @@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task) static struct se_task * -fd_alloc_task(struct se_cmd *cmd) +fd_alloc_task(unsigned char *cdb) { struct fd_request *fd_req; fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); - if (!(fd_req)) { - printk(KERN_ERR "Unable to allocate struct fd_request\n"); + if (!fd_req) { + pr_err("Unable to allocate struct fd_request\n"); return NULL; } - fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; - return &fd_req->fd_task; } static int fd_do_readv(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct file *fd = req->fd_dev->fd_file; + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + loff_t pos = (task->task_lba * + task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); - if (!(iov)) { - printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); - return -1; + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + if (!iov) { + pr_err("Unable to allocate fd_do_readv iov[]\n"); + return -ENOMEM; } - for (i = 0; i < task->task_sg_num; i++) { + for (i = 0; i < task->task_sg_nents; i++) { iov[i].iov_len = sg[i].length; iov[i].iov_base = sg_virt(&sg[i]); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); + ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); @@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task) */ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { if (ret < 0 || ret != task->task_size) { - printk(KERN_ERR "vfs_readv() returned %d," + pr_err("vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, (int)task->task_size); - return -1; + return (ret < 0 ? ret : -EINVAL); } } else { if (ret < 0) { - printk(KERN_ERR "vfs_readv() returned %d for non" + pr_err("vfs_readv() returned %d for non" " S_ISBLK\n", ret); - return -1; + return ret; } } @@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task) static int fd_do_writev(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct file *fd = req->fd_dev->fd_file; + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + loff_t pos = (task->task_lba * + task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); - if (!(iov)) { - printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); - return -1; + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + if (!iov) { + pr_err("Unable to allocate fd_do_writev iov[]\n"); + return -ENOMEM; } - for (i = 0; i < task->task_sg_num; i++) { + for (i = 0; i < task->task_sg_nents; i++) { iov[i].iov_len = sg[i].length; iov[i].iov_base = sg_virt(&sg[i]); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); + ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); if (ret < 0 || ret != task->task_size) { - printk(KERN_ERR "vfs_writev() returned %d\n", ret); - return -1; + pr_err("vfs_writev() returned %d\n", ret); + return (ret < 0 ? ret : -EINVAL); } return 1; @@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task) static void fd_emulate_sync_cache(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task_cdb[1] & 0x2); loff_t start, end; int ret; @@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task) /* * Determine if we will be flushing the entire device. */ - if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { + if (cmd->t_task_lba == 0 && cmd->data_length == 0) { start = 0; end = LLONG_MAX; } else { - start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; + start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else @@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task) ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); if (!immed) transport_complete_sync_cache(cmd, ret == 0); @@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; + loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; loff_t end = start + task->task_size; int ret; - DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", + pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", task->task_lba, task->task_size); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); } static int fd_do_task(struct se_task *task) @@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task) ret = fd_do_writev(task); if (ret > 0 && - DEV_ATTRIB(dev)->emulate_write_cache > 0 && - DEV_ATTRIB(dev)->emulate_fua_write > 0 && - T_TASK(cmd)->t_tasks_fua) { + dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && + dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + cmd->t_tasks_fua) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator @@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params( snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, "%s", arg_p); kfree(arg_p); - printk(KERN_INFO "FILEIO: Referencing Path: %s\n", + pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; break; @@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params( ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); kfree(arg_p); if (ret < 0) { - printk(KERN_ERR "strict_strtoull() failed for" + pr_err("strict_strtoull() failed for" " fd_dev_size=\n"); goto out; } - printk(KERN_INFO "FILEIO: Referencing Size: %llu" + pr_debug("FILEIO: Referencing Size: %llu" " bytes\n", fd_dev->fd_dev_size); fd_dev->fbd_flags |= FBDF_HAS_SIZE; break; case Opt_fd_buffered_io: match_int(args, &arg); if (arg != 1) { - printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); + pr_err("bogus fd_buffered_io=%d value\n", arg); ret = -EINVAL; goto out; } - printk(KERN_INFO "FILEIO: Using buffered I/O" + pr_debug("FILEIO: Using buffered I/O" " operations for struct fd_dev\n"); fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; @@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { - printk(KERN_ERR "Missing fd_dev_name=\n"); - return -1; + pr_err("Missing fd_dev_name=\n"); + return -EINVAL; } return 0; @@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev) { struct fd_dev *fd_dev = dev->dev_ptr; unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, - DEV_ATTRIB(dev)->block_size); + dev->se_sub_dev->se_dev_attrib.block_size); return blocks_long; } diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index ef4de2b4bd4..daebd710b89 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,8 +4,6 @@ #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 -/* Maximum queuedepth for the FILEIO HBA */ -#define FD_HBA_QUEUE_DEPTH 256 #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 @@ -18,8 +16,6 @@ struct fd_request { struct se_task fd_task; /* SCSI CDB from iSCSI Command PDU */ unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; - /* FILEIO device */ - struct fd_dev *fd_dev; } ____cacheline_aligned; #define FBDF_HAS_PATH 0x01 diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 0b8f8da8901..0639b975d6f 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -1,7 +1,7 @@ /******************************************************************************* * Filename: target_core_hba.c * - * This file copntains the iSCSI HBA Transport related functions. + * This file contains the TCM HBA Transport related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. @@ -45,6 +45,11 @@ static LIST_HEAD(subsystem_list); static DEFINE_MUTEX(subsystem_mutex); +static u32 hba_id_counter; + +static DEFINE_SPINLOCK(hba_lock); +static LIST_HEAD(hba_list); + int transport_subsystem_register(struct se_subsystem_api *sub_api) { struct se_subsystem_api *s; @@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) mutex_lock(&subsystem_mutex); list_for_each_entry(s, &subsystem_list, sub_api_list) { - if (!(strcmp(s->name, sub_api->name))) { - printk(KERN_ERR "%p is already registered with" + if (!strcmp(s->name, sub_api->name)) { + pr_err("%p is already registered with" " duplicate name %s, unable to process" " request\n", s, s->name); mutex_unlock(&subsystem_mutex); @@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) list_add_tail(&sub_api->sub_api_list, &subsystem_list); mutex_unlock(&subsystem_mutex); - printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" + pr_debug("TCM: Registered subsystem plugin: %s struct module:" " %p\n", sub_api->name, sub_api->owner); return 0; } @@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) hba = kzalloc(sizeof(*hba), GFP_KERNEL); if (!hba) { - printk(KERN_ERR "Unable to allocate struct se_hba\n"); + pr_err("Unable to allocate struct se_hba\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&hba->hba_dev_list); spin_lock_init(&hba->device_lock); - spin_lock_init(&hba->hba_queue_lock); mutex_init(&hba->hba_access_mutex); hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); hba->hba_flags |= hba_flags; - atomic_set(&hba->max_queue_depth, 0); - atomic_set(&hba->left_queue_depth, 0); - hba->transport = core_get_backend(plugin_name); if (!hba->transport) { ret = -EINVAL; @@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) if (ret < 0) goto out_module_put; - spin_lock(&se_global->hba_lock); - hba->hba_id = se_global->g_hba_id_counter++; - list_add_tail(&hba->hba_list, &se_global->g_hba_list); - spin_unlock(&se_global->hba_lock); + spin_lock(&hba_lock); + hba->hba_id = hba_id_counter++; + list_add_tail(&hba->hba_node, &hba_list); + spin_unlock(&hba_lock); - printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" + pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" " Core\n", hba->hba_id); return hba; @@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba) hba->transport->detach_hba(hba); - spin_lock(&se_global->hba_lock); - list_del(&hba->hba_list); - spin_unlock(&se_global->hba_lock); + spin_lock(&hba_lock); + list_del(&hba->hba_node); + spin_unlock(&hba_lock); - printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" + pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" " Core\n", hba->hba_id); if (hba->transport->owner) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 86639004af9..7e123410544 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -47,12 +47,6 @@ #include "target_core_iblock.h" -#if 0 -#define DEBUG_IBLOCK(x...) printk(x) -#else -#define DEBUG_IBLOCK(x...) -#endif - static struct se_subsystem_api iblock_template; static void iblock_bio_done(struct bio *, int); @@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) struct iblock_hba *ib_host; ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); - if (!(ib_host)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!ib_host) { + pr_err("Unable to allocate memory for" " struct iblock_hba\n"); return -ENOMEM; } ib_host->iblock_host_id = host_id; - atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); - hba->hba_ptr = (void *) ib_host; + hba->hba_ptr = ib_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" - " Target Core TCQ Depth: %d\n", hba->hba_id, - ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); + pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", + hba->hba_id, ib_host->iblock_host_id); return 0; } @@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba) { struct iblock_hba *ib_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" + pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" " Target Core\n", hba->hba_id, ib_host->iblock_host_id); kfree(ib_host); @@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) struct iblock_hba *ib_host = hba->hba_ptr; ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); - if (!(ib_dev)) { - printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); + if (!ib_dev) { + pr_err("Unable to allocate struct iblock_dev\n"); return NULL; } ib_dev->ibd_host = ib_host; - printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); + pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); return ib_dev; } @@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice( u32 dev_flags = 0; int ret = -EINVAL; - if (!(ib_dev)) { - printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); + if (!ib_dev) { + pr_err("Unable to locate struct iblock_dev parameter\n"); return ERR_PTR(ret); } memset(&dev_limits, 0, sizeof(struct se_dev_limits)); @@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice( * These settings need to be made tunable.. */ ib_dev->ibd_bio_set = bioset_create(32, 64); - if (!(ib_dev->ibd_bio_set)) { - printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); + if (!ib_dev->ibd_bio_set) { + pr_err("IBLOCK: Unable to create bioset()\n"); return ERR_PTR(-ENOMEM); } - printk(KERN_INFO "IBLOCK: Created bio_set()\n"); + pr_debug("IBLOCK: Created bio_set()\n"); /* * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. */ - printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", + pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); bd = blkdev_get_by_path(ib_dev->ibd_udev_path, @@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice( limits->logical_block_size = bdev_logical_block_size(bd); limits->max_hw_sectors = queue_max_hw_sectors(q); limits->max_sectors = queue_max_sectors(q); - dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH; - dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH; + dev_limits.hw_queue_depth = q->nr_requests; + dev_limits.queue_depth = q->nr_requests; - ib_dev->ibd_major = MAJOR(bd->bd_dev); - ib_dev->ibd_minor = MINOR(bd->bd_dev); ib_dev->ibd_bd = bd; dev = transport_add_device_to_core_hba(hba, - &iblock_template, se_dev, dev_flags, (void *)ib_dev, + &iblock_template, se_dev, dev_flags, ib_dev, &dev_limits, "IBLOCK", IBLOCK_VERSION); - if (!(dev)) + if (!dev) goto failed; - ib_dev->ibd_depth = dev->queue_depth; - /* * Check if the underlying struct block_device request_queue supports * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM * in ATA and we need to set TPE=1 */ if (blk_queue_discard(q)) { - DEV_ATTRIB(dev)->max_unmap_lba_count = + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = q->limits.max_discard_sectors; /* * Currently hardcoded to 1 in Linux/SCSI code.. */ - DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; - DEV_ATTRIB(dev)->unmap_granularity = + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = q->limits.discard_granularity; - DEV_ATTRIB(dev)->unmap_granularity_alignment = + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; - printk(KERN_INFO "IBLOCK: BLOCK Discard support available," + pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); } + if (blk_queue_nonrot(q)) + dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; + return dev; failed: @@ -211,8 +201,6 @@ failed: ib_dev->ibd_bio_set = NULL; } ib_dev->ibd_bd = NULL; - ib_dev->ibd_major = 0; - ib_dev->ibd_minor = 0; return ERR_PTR(ret); } @@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) } static struct se_task * -iblock_alloc_task(struct se_cmd *cmd) +iblock_alloc_task(unsigned char *cdb) { struct iblock_req *ib_req; ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); - if (!(ib_req)) { - printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); + if (!ib_req) { + pr_err("Unable to allocate memory for struct iblock_req\n"); return NULL; } - ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; atomic_set(&ib_req->ib_bio_cnt, 0); return &ib_req->ib_task; } @@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( bdev_logical_block_size(bd)) - 1); u32 block_size = bdev_logical_block_size(bd); - if (block_size == DEV_ATTRIB(dev)->block_size) + if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) return blocks_long; switch (block_size) { case 4096: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 2048: blocks_long <<= 1; break; @@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 2048: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 1; break; @@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 1024: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 2; break; @@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 512: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 3; break; @@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( */ static void iblock_emulate_sync_cache(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; - int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task_cdb[1] & 0x2); sector_t error_sector; int ret; @@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) */ ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); if (ret != 0) { - printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " + pr_err("IBLOCK: block_issue_flush() failed: %d " " error_sector: %llu\n", ret, (unsigned long long)error_sector); } @@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task) * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. */ - if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || - (DEV_ATTRIB(dev)->emulate_fua_write > 0 && - T_TASK(task->task_se_cmd)->t_tasks_fua)) + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + task->task_se_cmd->t_tasks_fua)) rw = WRITE_FUA; else rw = WRITE; @@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task) while (bio) { nbio = bio->bi_next; bio->bi_next = NULL; - DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" - " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); + pr_debug("Calling submit_bio() task: %p bio: %p" + " bio->bi_sector: %llu\n", task, bio, + (unsigned long long)bio->bi_sector); submit_bio(rw, bio); bio = nbio; @@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; - int ret = 0, arg, token; + int ret = 0, token; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, switch (token) { case Opt_udev_path: if (ib_dev->ibd_bd) { - printk(KERN_ERR "Unable to set udev_path= while" + pr_err("Unable to set udev_path= while" " ib_dev->ibd_bd exists\n"); ret = -EEXIST; goto out; @@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, "%s", arg_p); kfree(arg_p); - printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", + pr_debug("IBLOCK: Referencing UDEV path: %s\n", ib_dev->ibd_udev_path); ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; break; case Opt_force: - match_int(args, &arg); - ib_dev->ibd_force = arg; - printk(KERN_INFO "IBLOCK: Set force=%d\n", - ib_dev->ibd_force); break; default: break; @@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params( struct iblock_dev *ibd = se_dev->se_dev_su_ptr; if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { - printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); - return -1; + pr_err("Missing udev_path= parameters for IBLOCK\n"); + return -EINVAL; } return 0; @@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params( bl += sprintf(b + bl, " "); if (bd) { bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", - ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ? + MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? "CLAIMED: IBLOCK" : "CLAIMED: OS"); } else { - bl += sprintf(b + bl, "Major: %d Minor: %d\n", - ibd->ibd_major, ibd->ibd_minor); + bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); } return bl; @@ -585,103 +568,103 @@ static struct bio *iblock_get_bio( struct bio *bio; bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); - if (!(bio)) { - printk(KERN_ERR "Unable to allocate memory for bio\n"); + if (!bio) { + pr_err("Unable to allocate memory for bio\n"); *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; return NULL; } - DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" - " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); - DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); + pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" + " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); + pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = (void *) task; + bio->bi_private = task; bio->bi_destructor = iblock_bio_destructor; bio->bi_end_io = &iblock_bio_done; bio->bi_sector = lba; atomic_inc(&ib_req->ib_bio_cnt); - DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); - DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", + pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); + pr_debug("Set ib_req->ib_bio_cnt: %d\n", atomic_read(&ib_req->ib_bio_cnt)); return bio; } -static int iblock_map_task_SG(struct se_task *task) +static int iblock_map_data_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct iblock_dev *ib_dev = task->se_dev->dev_ptr; struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; struct scatterlist *sg; int ret = 0; - u32 i, sg_num = task->task_sg_num; + u32 i, sg_num = task->task_sg_nents; sector_t block_lba; /* * Do starting conversion up from non 512-byte blocksize with * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. */ - if (DEV_ATTRIB(dev)->block_size == 4096) + if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) block_lba = (task->task_lba << 3); - else if (DEV_ATTRIB(dev)->block_size == 2048) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) block_lba = (task->task_lba << 2); - else if (DEV_ATTRIB(dev)->block_size == 1024) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) block_lba = (task->task_lba << 1); - else if (DEV_ATTRIB(dev)->block_size == 512) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) block_lba = task->task_lba; else { - printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" - " %u\n", DEV_ATTRIB(dev)->block_size); + pr_err("Unsupported SCSI -> BLOCK LBA conversion:" + " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); return PYX_TRANSPORT_LU_COMM_FAILURE; } bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); - if (!(bio)) + if (!bio) return ret; ib_req->ib_bio = bio; hbio = tbio = bio; /* * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist - * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. + * from task->task_sg -> struct scatterlist memory. */ - for_each_sg(task->task_sg, sg, task->task_sg_num, i) { - DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + pr_debug("task: %p bio: %p Calling bio_add_page(): page:" " %p len: %u offset: %u\n", task, bio, sg_page(sg), sg->length, sg->offset); again: ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); if (ret != sg->length) { - DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", - bio->bi_sector); - DEBUG_IBLOCK("** task->task_size: %u\n", + pr_debug("*** Set bio->bi_sector: %llu\n", + (unsigned long long)bio->bi_sector); + pr_debug("** task->task_size: %u\n", task->task_size); - DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", + pr_debug("*** bio->bi_max_vecs: %u\n", bio->bi_max_vecs); - DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", + pr_debug("*** bio->bi_vcnt: %u\n", bio->bi_vcnt); bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); - if (!(bio)) + if (!bio) goto fail; tbio = tbio->bi_next = bio; - DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" + pr_debug("-----------------> Added +1 bio: %p to" " list, Going to again\n", bio); goto again; } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; - DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" + pr_debug("task: %p bio-add_page() passed!, decremented" " sg_num to %u\n", task, sg_num); - DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" - " to %llu\n", task, block_lba); - DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" + pr_debug("task: %p bio_add_page() passed!, increased lba" + " to %llu\n", task, (unsigned long long)block_lba); + pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" " %u\n", task, bio->bi_vcnt); } @@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 */ - if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) err = -EIO; if (err != 0) { - printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," + pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," " err: %d\n", bio, err); /* * Bump the ib_bio_err_cnt and release bio. @@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Wait to complete the task until the last bio as completed. */ - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; ibr->ib_bio = NULL; transport_complete_task(task, 0); return; } - DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, bio->bi_sector, err); + pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", + task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); /* * bio_put() will call iblock_bio_destructor() to release the bio back * to ibr->ib_bio_set. @@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Wait to complete the task until the last bio as completed. */ - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; /* * Return GOOD status for task if zero ib_bio_err_cnt exists. @@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .map_task_SG = iblock_map_task_SG, + .map_data_SG = iblock_map_data_SG, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 64c1f4d69f7..a121cd1b657 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -3,9 +3,6 @@ #define IBLOCK_VERSION "4.0" -#define IBLOCK_HBA_QUEUE_DEPTH 512 -#define IBLOCK_DEVICE_QUEUE_DEPTH 32 -#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 #define IBLOCK_MAX_CDBS 16 #define IBLOCK_LBA_SHIFT 9 @@ -15,18 +12,12 @@ struct iblock_req { atomic_t ib_bio_cnt; atomic_t ib_bio_err_cnt; struct bio *ib_bio; - struct iblock_dev *ib_dev; } ____cacheline_aligned; #define IBDF_HAS_UDEV_PATH 0x01 -#define IBDF_HAS_FORCE 0x02 struct iblock_dev { unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; - int ibd_force; - int ibd_major; - int ibd_minor; - u32 ibd_depth; u32 ibd_flags; struct bio_set *ibd_bio_set; struct block_device *ibd_bd; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b662db3a320..1c1b849cd4f 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -62,7 +62,7 @@ int core_pr_dump_initiator_port( char *buf, u32 size) { - if (!(pr_reg->isid_present_at_reg)) + if (!pr_reg->isid_present_at_reg) return 0; snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); @@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) struct se_session *sess = cmd->se_sess; int ret; - if (!(sess)) + if (!sess) return 0; spin_lock(&dev->dev_reservation_lock); @@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) } if (dev->dev_reserved_node_acl != sess->se_node_acl) { spin_unlock(&dev->dev_reservation_lock); - return -1; + return -EINVAL; } if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { spin_unlock(&dev->dev_reservation_lock); return 0; } - ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; + ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL; spin_unlock(&dev->dev_reservation_lock); return ret; @@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if (!(sess) || !(tpg)) + if (!sess || !tpg) return 0; spin_lock(&dev->dev_reservation_lock); @@ -142,9 +142,9 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) dev->dev_res_bin_isid = 0; dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; } - printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" - " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), - SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" + " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -157,9 +157,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && - (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { - printk(KERN_ERR "LongIO and Obselete Bits set, returning" + if ((cmd->t_task_cdb[1] & 0x01) && + (cmd->t_task_cdb[1] & 0x02)) { + pr_err("LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; } @@ -167,19 +167,19 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) * This is currently the case for target_core_mod passthrough struct se_cmd * ops */ - if (!(sess) || !(tpg)) + if (!sess || !tpg) return 0; spin_lock(&dev->dev_reservation_lock); if (dev->dev_reserved_node_acl && (dev->dev_reserved_node_acl != sess->se_node_acl)) { - printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", - TPG_TFO(tpg)->get_fabric_name()); - printk(KERN_ERR "Original reserver LUN: %u %s\n", - SE_LUN(cmd)->unpacked_lun, + pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", + tpg->se_tpg_tfo->get_fabric_name()); + pr_err("Original reserver LUN: %u %s\n", + cmd->se_lun->unpacked_lun, dev->dev_reserved_node_acl->initiatorname); - printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" - " from %s \n", SE_LUN(cmd)->unpacked_lun, + pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" + " from %s \n", cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -192,9 +192,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) dev->dev_res_bin_isid = sess->sess_bin_isid; dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; } - printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" - " for %s\n", TPG_TFO(tpg)->get_fabric_name(), - SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" + " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -215,15 +215,15 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) struct se_session *se_sess = cmd->se_sess; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; - unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; - int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); + struct t10_reservation *pr_tmpl = &su_dev->t10_pr; + unsigned char *cdb = &cmd->t_task_cdb[0]; + int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; - if (!(se_sess)) + if (!se_sess) return 0; - if (!(crh)) + if (!crh) goto after_crh; pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, @@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) } if (conflict) { - printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" + pr_err("Received legacy SPC-2 RESERVE/RELEASE" " while active SPC-3 registrations exist," " returning RESERVATION_CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder( u32 pr_reg_type) { struct se_dev_entry *se_deve; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; int other_cdb = 0, ignore_reg; int registered_nexus = 0, ret = 1; /* Conflict by default */ int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ @@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder( registered_nexus = 1; break; default: - return -1; + return -EINVAL; } /* * Referenced from spc4r17 table 45 for *NON* PR holder access @@ -412,9 +412,9 @@ static int core_scsi3_pr_seq_non_holder( ret = (registered_nexus) ? 0 : 1; break; default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); - return -1; + return -EINVAL; } break; case RELEASE: @@ -459,9 +459,9 @@ static int core_scsi3_pr_seq_non_holder( ret = 0; /* Allowed */ break; default: - printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", + pr_err("Unknown MI Service Action: 0x%02x\n", (cdb[1] & 0x1f)); - return -1; + return -EINVAL; } break; case ACCESS_CONTROL_IN: @@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder( * Case where the CDB is explicitly allowed in the above switch * statement. */ - if (!(ret) && !(other_cdb)) { + if (!ret && !other_cdb) { #if 0 - printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" + pr_debug("Allowing explict CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); #endif @@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder( /* * Conflict for write exclusive */ - printk(KERN_INFO "%s Conflict for unregistered nexus" + pr_debug("%s Conflict for unregistered nexus" " %s CDB: 0x%02x to %s reservation\n", transport_dump_cmd_direction(cmd), se_sess->se_node_acl->initiatorname, cdb[0], @@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder( * nexuses to issue CDBs. */ #if 0 - if (!(registered_nexus)) { - printk(KERN_INFO "Allowing implict CDB: 0x%02x" + if (!registered_nexus) { + pr_debug("Allowing implict CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder( * allow commands from registered nexuses. */ #if 0 - printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" + pr_debug("Allowing implict CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); #endif return 0; } } - printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" + pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" " for %s reservation\n", transport_dump_cmd_direction(cmd), (registered_nexus) ? "" : "un", se_sess->se_node_acl->initiatorname, cdb[0], @@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder( static u32 core_scsi3_pr_generation(struct se_device *dev) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; u32 prg; /* * PRGeneration field shall contain the value of a 32-bit wrapping @@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev) * See spc4r17 section 6.3.12 READ_KEYS service action */ spin_lock(&dev->dev_reservation_lock); - prg = T10_RES(su_dev)->pr_generation++; + prg = su_dev->t10_pr.pr_generation++; spin_unlock(&dev->dev_reservation_lock); return prg; @@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check( struct se_session *sess = cmd->se_sess; int ret; - if (!(sess)) + if (!sess) return 0; /* * A legacy SPC-2 reservation is being held. @@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check( return core_scsi2_reservation_check(cmd, pr_reg_type); spin_lock(&dev->dev_reservation_lock); - if (!(dev->dev_pr_res_holder)) { + if (!dev->dev_pr_res_holder) { spin_unlock(&dev->dev_reservation_lock); return 0; } @@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check( cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { spin_unlock(&dev->dev_reservation_lock); - return -1; + return -EINVAL; } - if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { + if (!dev->dev_pr_res_holder->isid_present_at_reg) { spin_unlock(&dev->dev_reservation_lock); return 0; } ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == - sess->sess_bin_isid) ? 0 : -1; + sess->sess_bin_isid) ? 0 : -EINVAL; /* * Use bit in *pr_reg_type to notify ISID mismatch in * core_scsi3_pr_seq_non_holder(). @@ -620,19 +620,19 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( int all_tg_pt, int aptpl) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_pr_registration *pr_reg; pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); - if (!(pr_reg)) { - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); return NULL; } - pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, + pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, GFP_ATOMIC); - if (!(pr_reg->pr_aptpl_buf)) { - printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); + if (!pr_reg->pr_aptpl_buf) { + pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); kmem_cache_free(t10_pr_reg_cache, pr_reg); return NULL; } @@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( */ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg)) + if (!pr_reg) return NULL; /* * Return pointer to pr_reg for ALL_TG_PT=0 */ - if (!(all_tg_pt)) + if (!all_tg_pt) return pr_reg; /* * Create list of matching SCSI Initiator Port registrations @@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( * that have not been make explict via a ConfigFS * MappedLUN group for the SCSI Initiator Node ACL. */ - if (!(deve_tmp->se_lun_acl)) + if (!deve_tmp->se_lun_acl) continue; nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; @@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( */ ret = core_scsi3_lunacl_depend_item(deve_tmp); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend" + pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); smp_mb__after_atomic_dec(); @@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_reg_atp = __core_scsi3_do_alloc_registration(dev, nacl_tmp, deve_tmp, NULL, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg_atp)) { + if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); smp_mb__after_atomic_dec(); atomic_dec(&deve_tmp->pr_ref_count); @@ -803,7 +803,7 @@ out: } int core_scsi3_alloc_aptpl_registration( - struct t10_reservation_template *pr_tmpl, + struct t10_reservation *pr_tmpl, u64 sa_res_key, unsigned char *i_port, unsigned char *isid, @@ -817,15 +817,15 @@ int core_scsi3_alloc_aptpl_registration( { struct t10_pr_registration *pr_reg; - if (!(i_port) || !(t_port) || !(sa_res_key)) { - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); - return -1; + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); + return -EINVAL; } pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); - if (!(pr_reg)) { - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); - return -1; + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); + return -ENOMEM; } pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); @@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration( pr_reg->pr_res_holder = res_holder; list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); - printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" + pr_debug("SPC-3 PR APTPL Successfully added registration%s from" " metadata\n", (res_holder) ? "+reservation" : ""); return 0; } @@ -891,13 +891,13 @@ static void core_scsi3_aptpl_reserve( dev->dev_pr_res_holder = pr_reg; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" + pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" " new reservation holder TYPE: %s ALL_TG_PT: %d\n", - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", - TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", + tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); } @@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration( struct se_dev_entry *deve) { struct t10_pr_registration *pr_reg, *pr_reg_tmp; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; u16 tpgt; @@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration( */ snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", - TPG_TFO(tpg)->tpg_get_wwn(tpg)); - tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); /* * Look for the matching registrations+reservation from those * created from APTPL metadata. Note that multiple registrations @@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration( spin_lock(&pr_tmpl->aptpl_reg_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, pr_reg_aptpl_list) { - if (!(strcmp(pr_reg->pr_iport, i_port)) && + if (!strcmp(pr_reg->pr_iport, i_port) && (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && !(strcmp(pr_reg->pr_tport, t_port)) && (pr_reg->pr_reg_tpgt == tpgt) && @@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration( struct se_lun *lun, struct se_lun_acl *lun_acl) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_node_acl *nacl = lun_acl->se_lun_nacl; struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return __core_scsi3_check_aptpl_registration(dev, tpg, lun, @@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration( prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" + pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? "_AND_MOVE" : (register_type == 1) ? "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, (prf_isid) ? i_buf : ""); - printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", + pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", - TRANSPORT(dev)->name); - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + dev->transport->name); + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation, pr_reg->pr_reg_aptpl); @@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration( int register_type, int register_move) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; /* * Increment PRgeneration counter for struct se_device upon a successful @@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration( * for the REGISTER. */ pr_reg->pr_res_generation = (register_move) ? - T10_RES(su_dev)->pr_generation++ : + su_dev->t10_pr.pr_generation++ : core_scsi3_pr_generation(dev); spin_lock(&pr_tmpl->registration_lock); @@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration( /* * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. */ - if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) + if (!pr_reg->pr_reg_all_tg_pt || register_move) return; /* * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 @@ -1106,8 +1106,8 @@ static int core_scsi3_alloc_registration( pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg)) - return -1; + if (!pr_reg) + return -EPERM; __core_scsi3_add_registration(dev, nacl, pr_reg, register_type, register_move); @@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( struct se_node_acl *nacl, unsigned char *isid) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct se_portal_group *tpg; @@ -1137,14 +1137,14 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( * If this registration does NOT contain a fabric provided * ISID, then we have found a match. */ - if (!(pr_reg->isid_present_at_reg)) { + if (!pr_reg->isid_present_at_reg) { /* * Determine if this SCSI device server requires that * SCSI Intiatior TransportID w/ ISIDs is enforced * for fabric modules (iSCSI) requiring them. */ - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { - if (DEV_ATTRIB(dev)->enforce_pr_isids) + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { + if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) continue; } atomic_inc(&pr_reg->pr_res_holders); @@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( * SCSI Initiator Port TransportIDs, then we expect a valid * matching ISID to be provided by the local SCSI Initiator Port. */ - if (!(isid)) + if (!isid) continue; if (strcmp(isid, pr_reg->pr_reg_isid)) continue; @@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( struct se_portal_group *tpg = nacl->se_tpg; unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], + tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], PR_REG_ISID_LEN); isid_ptr = &buf[0]; } @@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release( spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { + if (!pr_res_holder) { spin_unlock(&dev->dev_reservation_lock); return ret; } @@ -1236,11 +1236,11 @@ static int core_scsi3_check_implict_release( (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, pr_reg->pr_reg_nacl->initiatorname)) && (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { - printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" + pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" " UNREGISTER while existing reservation with matching" " key 0x%016Lx is present from another SCSI Initiator" " Port\n", pr_reg->pr_res_key); - ret = -1; + ret = -EPERM; } spin_unlock(&dev->dev_reservation_lock); @@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release( } /* - * Called with struct t10_reservation_template->registration_lock held. + * Called with struct t10_reservation->registration_lock held. */ static void __core_scsi3_free_registration( struct se_device *dev, @@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration( { struct target_core_fabric_ops *tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; int prf_isid; @@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration( */ while (atomic_read(&pr_reg->pr_res_holders) != 0) { spin_unlock(&pr_tmpl->registration_lock); - printk("SPC-3 PR [%s] waiting for pr_res_holders\n", + pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", tfo->get_fabric_name()); cpu_relax(); spin_lock(&pr_tmpl->registration_lock); } - printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" + pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" " Node: %s%s\n", tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", - TRANSPORT(dev)->name); - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + dev->transport->name); + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation); - if (!(preempt_and_abort_list)) { + if (!preempt_and_abort_list) { pr_reg->pr_reg_deve = NULL; pr_reg->pr_reg_nacl = NULL; kfree(pr_reg->pr_aptpl_buf); @@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl( struct se_device *dev, struct se_node_acl *nacl) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; /* * If the passed se_node_acl matches the reservation holder, @@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl( void core_scsi3_free_all_registrations( struct se_device *dev) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; spin_lock(&dev->dev_reservation_lock); @@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations( static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) { - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &tpg->tpg_group.cg_item); } static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) { - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); @@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) return 0; - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &nacl->acl_group.cg_item); } @@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) return; } - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); @@ -1430,13 +1430,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) /* * For nacl->dynamic_node_acl=1 */ - if (!(lun_acl)) + if (!lun_acl) return 0; nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &lun_acl->se_lun_group.cg_item); } @@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) /* * For nacl->dynamic_node_acl=1 */ - if (!(lun_acl)) { + if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); smp_mb__after_atomic_dec(); return; @@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); @@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port( int all_tg_pt, int aptpl) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_port *tmp_port; struct se_portal_group *dest_tpg = NULL, *tmp_tpg; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct se_node_acl *dest_node_acl = NULL; struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port( * processing in the loop of tid_dest_list below. */ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); - if (!(tidh_new)) { - printk(KERN_ERR "Unable to allocate tidh_new\n"); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } INIT_LIST_HEAD(&tidh_new->dest_list); @@ -1509,10 +1509,10 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_node_acl = se_sess->se_node_acl; tidh_new->dest_se_deve = local_se_deve; - local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, local_se_deve, l_isid, sa_res_key, all_tg_pt, aptpl); - if (!(local_pr_reg)) { + if (!local_pr_reg) { kfree(tidh_new); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port( */ tidh_new->dest_local_nexus = 1; list_add_tail(&tidh_new->dest_list, &tid_dest_list); + + buf = transport_kmap_first_data_page(cmd); /* * For a PERSISTENT RESERVE OUT specify initiator ports payload, * first extract TransportID Parameter Data Length, and make sure @@ -1535,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port( tpdl |= buf[27] & 0xff; if ((tpdl + 28) != cmd->data_length) { - printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" + pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" " does not equal CDB data_length: %u\n", tpdl, cmd->data_length); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -1555,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port( spin_lock(&dev->se_port_lock); list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { tmp_tpg = tmp_port->sep_tpg; - if (!(tmp_tpg)) + if (!tmp_tpg) continue; - tmp_tf_ops = TPG_TFO(tmp_tpg); - if (!(tmp_tf_ops)) + tmp_tf_ops = tmp_tpg->se_tpg_tfo; + if (!tmp_tf_ops) continue; - if (!(tmp_tf_ops->get_fabric_proto_ident) || - !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) + if (!tmp_tf_ops->get_fabric_proto_ident || + !tmp_tf_ops->tpg_parse_pr_out_transport_id) continue; /* * Look for the matching proto_ident provided by @@ -1575,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port( i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( tmp_tpg, (const char *)ptr, &tid_len, &iport_ptr); - if (!(i_str)) + if (!i_str) continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); @@ -1584,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_tpg_depend_item(tmp_tpg); if (ret != 0) { - printk(KERN_ERR " core_scsi3_tpg_depend_item()" + pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); @@ -1605,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port( } spin_unlock_bh(&tmp_tpg->acl_node_lock); - if (!(dest_node_acl)) { + if (!dest_node_acl) { core_scsi3_tpg_undepend_item(tmp_tpg); spin_lock(&dev->se_port_lock); continue; @@ -1613,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_nodeacl_depend_item(dest_node_acl); if (ret != 0) { - printk(KERN_ERR "configfs_depend_item() failed" + pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); @@ -1623,9 +1625,9 @@ static int core_scsi3_decode_spec_i_port( } dest_tpg = tmp_tpg; - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" " %s Port RTPI: %hu\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_rtpi); spin_lock(&dev->se_port_lock); @@ -1633,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port( } spin_unlock(&dev->se_port_lock); - if (!(dest_tpg)) { - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" + if (!dest_tpg) { + pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" " dest_tpg\n"); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; } #if 0 - printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" " tid_len: %d for %s + %s\n", - TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, + dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, tpdl, tid_len, i_str, iport_ptr); #endif if (tid_len > tpdl) { - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" + pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" " %u for Transport ID: %s\n", tid_len, ptr); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1660,10 +1662,10 @@ static int core_scsi3_decode_spec_i_port( */ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, dest_rtpi); - if (!(dest_se_deve)) { - printk(KERN_ERR "Unable to locate %s dest_se_deve" + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve" " from destination RTPI: %hu\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_rtpi); core_scsi3_nodeacl_undepend_item(dest_node_acl); @@ -1674,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_lunacl_depend_item(dest_se_deve); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend_item()" + pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); @@ -1684,9 +1686,9 @@ static int core_scsi3_decode_spec_i_port( goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" " dest_se_deve mapped_lun: %u\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); #endif /* @@ -1712,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port( */ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); - if (!(tidh_new)) { - printk(KERN_ERR "Unable to allocate tidh_new\n"); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1741,10 +1743,10 @@ static int core_scsi3_decode_spec_i_port( * and then call __core_scsi3_add_registration() in the * 2nd loop which will never fail. */ - dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, all_tg_pt, aptpl); - if (!(dest_pr_reg)) { + if (!dest_pr_reg) { core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port( tid_len = 0; } + + transport_kunmap_first_data_page(cmd); + /* * Go ahead and create a registrations from tid_dest_list for the * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl @@ -1787,12 +1792,12 @@ static int core_scsi3_decode_spec_i_port( prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, + __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); - printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" + pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" " registered Transport ID for Node: %s%s Mapped LUN:" - " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), + " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, (prf_isid) ? &i_buf[0] : "", dest_se_deve->mapped_lun); @@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port( return 0; out: + transport_kunmap_first_data_page(cmd); /* * For the failure case, release everything from tid_dest_list * including *dest_pr_reg and the configfs dependances.. @@ -1855,7 +1861,7 @@ static int __core_scsi3_update_aptpl_buf( { struct se_lun *lun; struct se_portal_group *tpg; - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_pr_registration *pr_reg; unsigned char tmp[512], isid_buf[32]; ssize_t len = 0; @@ -1873,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf( /* * Walk the registration list.. */ - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { tmp[0] = '\0'; @@ -1900,7 +1906,7 @@ static int __core_scsi3_update_aptpl_buf( "res_holder=1\nres_type=%02x\n" "res_scope=%02x\nres_all_tg_pt=%d\n" "mapped_lun=%u\n", reg_count, - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, isid_buf, pr_reg->pr_res_key, pr_reg->pr_res_type, pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, @@ -1910,17 +1916,17 @@ static int __core_scsi3_update_aptpl_buf( "initiator_fabric=%s\ninitiator_node=%s\n%s" "sa_res_key=%llu\nres_holder=0\n" "res_all_tg_pt=%d\nmapped_lun=%u\n", - reg_count, TPG_TFO(tpg)->get_fabric_name(), + reg_count, tpg->se_tpg_tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, isid_buf, pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, pr_reg->pr_res_mapped_lun); } if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - printk(KERN_ERR "Unable to update renaming" + pr_err("Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&T10_RES(su_dev)->registration_lock); - return -1; + spin_unlock(&su_dev->t10_pr.registration_lock); + return -EMSGSIZE; } len += sprintf(buf+len, "%s", tmp); @@ -1929,23 +1935,23 @@ static int __core_scsi3_update_aptpl_buf( */ snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" - " %d\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + " %d\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - printk(KERN_ERR "Unable to update renaming" + pr_err("Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&T10_RES(su_dev)->registration_lock); - return -1; + spin_unlock(&su_dev->t10_pr.registration_lock); + return -EMSGSIZE; } len += sprintf(buf+len, "%s", tmp); reg_count++; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); - if (!(reg_count)) + if (!reg_count) len += sprintf(buf+len, "No Registrations or Reservations"); return 0; @@ -1975,7 +1981,7 @@ static int __core_scsi3_write_aptpl_to_file( unsigned char *buf, u32 pr_aptpl_buf_len) { - struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; + struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; struct file *file; struct iovec iov[1]; mm_segment_t old_fs; @@ -1987,21 +1993,21 @@ static int __core_scsi3_write_aptpl_to_file( memset(path, 0, 512); if (strlen(&wwn->unit_serial[0]) >= 512) { - printk(KERN_ERR "WWN value for struct se_device does not fit" + pr_err("WWN value for struct se_device does not fit" " into path buffer\n"); - return -1; + return -EMSGSIZE; } snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); file = filp_open(path, flags, 0600); if (IS_ERR(file) || !file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) for APTPL metadata" + pr_err("filp_open(%s) for APTPL metadata" " failed\n", path); - return -1; + return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); } iov[0].iov_base = &buf[0]; - if (!(pr_aptpl_buf_len)) + if (!pr_aptpl_buf_len) iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ else iov[0].iov_len = pr_aptpl_buf_len; @@ -2012,9 +2018,9 @@ static int __core_scsi3_write_aptpl_to_file( set_fs(old_fs); if (ret < 0) { - printk("Error writing APTPL metadata file: %s\n", path); + pr_debug("Error writing APTPL metadata file: %s\n", path); filp_close(file, NULL); - return -1; + return -EIO; } filp_close(file, NULL); @@ -2032,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl( /* * Can be called with a NULL pointer from PROUT service action CLEAR */ - if (!(in_buf)) { + if (!in_buf) { memset(null_buf, 0, 64); buf = &null_buf[0]; /* @@ -2049,14 +2055,14 @@ static int core_scsi3_update_and_write_aptpl( ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, clear_aptpl_metadata); if (ret != 0) - return -1; + return ret; /* * __core_scsi3_write_aptpl_to_file() will call strlen() * on the passed buf to determine pr_aptpl_buf_len. */ ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); if (ret != 0) - return -1; + return ret; return ret; } @@ -2070,28 +2076,28 @@ static int core_scsi3_emulate_pro_register( int spec_i_pt, int ignore_key) { - struct se_session *se_sess = SE_SESS(cmd); - struct se_device *dev = SE_DEV(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; /* Used for APTPL metadata w/ UNREGISTER */ unsigned char *pr_aptpl_buf = NULL; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; int pr_holder = 0, ret = 0, type; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { memset(&isid_buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], PR_REG_ISID_LEN); isid_ptr = &isid_buf[0]; } @@ -2099,30 +2105,30 @@ static int core_scsi3_emulate_pro_register( * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 */ pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_e)) { + if (!pr_reg_e) { if (res_key) { - printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" + pr_warn("SPC-3 PR: Reservation Key non-zero" " for SA REGISTER, returning CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; } /* * Do nothing but return GOOD status. */ - if (!(sa_res_key)) + if (!sa_res_key) return PYX_TRANSPORT_SENT_TO_TRANSPORT; - if (!(spec_i_pt)) { + if (!spec_i_pt) { /* * Perform the Service Action REGISTER on the Initiator * Port Endpoint that the PRO was received from on the * Logical Unit of the SCSI device server. */ - ret = core_scsi3_alloc_registration(SE_DEV(cmd), + ret = core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, se_deve, isid_ptr, sa_res_key, all_tg_pt, aptpl, ignore_key, 0); if (ret != 0) { - printk(KERN_ERR "Unable to allocate" + pr_err("Unable to allocate" " struct t10_pr_registration\n"); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -2143,10 +2149,10 @@ static int core_scsi3_emulate_pro_register( /* * Nothing left to do for the APTPL=0 case. */ - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated for" + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER\n"); return 0; } @@ -2155,15 +2161,15 @@ static int core_scsi3_emulate_pro_register( * update the APTPL metadata information using its * preallocated *pr_reg->pr_aptpl_buf. */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); + pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); } core_scsi3_put_pr_reg(pr_reg); @@ -2175,9 +2181,9 @@ static int core_scsi3_emulate_pro_register( pr_reg = pr_reg_e; type = pr_reg->pr_res_type; - if (!(ignore_key)) { + if (!ignore_key) { if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR REGISTER: Received" + pr_err("SPC-3 PR REGISTER: Received" " res_key: 0x%016Lx does not match" " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, @@ -2187,7 +2193,7 @@ static int core_scsi3_emulate_pro_register( } } if (spec_i_pt) { - printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" + pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" " set while sa_res_key=0\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -2197,7 +2203,7 @@ static int core_scsi3_emulate_pro_register( * must also set ALL_TG_PT=1 in the incoming PROUT. */ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { - printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" + pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" " registration exists, but ALL_TG_PT=1 bit not" " present in received PROUT\n"); core_scsi3_put_pr_reg(pr_reg); @@ -2209,8 +2215,8 @@ static int core_scsi3_emulate_pro_register( if (aptpl) { pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); - if (!(pr_aptpl_buf)) { - printk(KERN_ERR "Unable to allocate" + if (!pr_aptpl_buf) { + pr_err("Unable to allocate" " pr_aptpl_buf\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_LU_COMM_FAILURE; @@ -2221,9 +2227,9 @@ static int core_scsi3_emulate_pro_register( * Nexus sa_res_key=1 Change Reservation Key for registered I_T * Nexus. */ - if (!(sa_res_key)) { + if (!sa_res_key) { pr_holder = core_scsi3_check_implict_release( - SE_DEV(cmd), pr_reg); + cmd->se_dev, pr_reg); if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); @@ -2240,7 +2246,7 @@ static int core_scsi3_emulate_pro_register( &pr_tmpl->registration_list, pr_reg_list) { - if (!(pr_reg_p->pr_reg_all_tg_pt)) + if (!pr_reg_p->pr_reg_all_tg_pt) continue; if (pr_reg_p->pr_res_key != res_key) @@ -2260,7 +2266,7 @@ static int core_scsi3_emulate_pro_register( /* * Release the calling I_T Nexus registration now.. */ - __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, + __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2289,10 +2295,10 @@ static int core_scsi3_emulate_pro_register( } spin_unlock(&pr_tmpl->registration_lock); - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(dev, NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" " for UNREGISTER\n"); return 0; } @@ -2300,9 +2306,9 @@ static int core_scsi3_emulate_pro_register( ret = core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated" + pr_debug("SPC-3 PR: Set APTPL Bit Activated" " for UNREGISTER\n"); } @@ -2315,20 +2321,20 @@ static int core_scsi3_emulate_pro_register( * READ_KEYS service action. */ pr_reg->pr_res_generation = core_scsi3_pr_generation( - SE_DEV(cmd)); + cmd->se_dev); pr_reg->pr_res_key = sa_res_key; - printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" + pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" " Key for %s to: 0x%016Lx PRgeneration:" - " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), + " 0x%08x\n", cmd->se_tfo->get_fabric_name(), (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", pr_reg->pr_reg_nacl->initiatorname, pr_reg->pr_res_key, pr_reg->pr_res_generation); - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(dev, NULL, 0); core_scsi3_put_pr_reg(pr_reg); - printk("SPC-3 PR: Set APTPL Bit Deactivated" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" " for REGISTER\n"); return 0; } @@ -2336,9 +2342,9 @@ static int core_scsi3_emulate_pro_register( ret = core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated" + pr_debug("SPC-3 PR: Set APTPL Bit Activated" " for REGISTER\n"); } @@ -2378,19 +2384,19 @@ static int core_scsi3_pro_reserve( int scope, u64 res_key) { - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct se_dev_entry *se_deve; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; int ret, prf_isid; memset(i_buf, 0, PR_REG_ISID_ID_LEN); - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } se_tpg = se_sess->se_tpg; @@ -2398,10 +2404,10 @@ static int core_scsi3_pro_reserve( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RESERVE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2415,7 +2421,7 @@ static int core_scsi3_pro_reserve( * registered with the logical unit for the I_T nexus; and */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" + pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); @@ -2432,7 +2438,7 @@ static int core_scsi3_pro_reserve( * and that persistent reservation has a scope of LU_SCOPE. */ if (scope != PR_SCOPE_LU_SCOPE) { - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -2456,12 +2462,12 @@ static int core_scsi3_pro_reserve( */ if (pr_res_holder != pr_reg) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + pr_err("SPC-3 PR: Attempted RESERVE from" " [%s]: %s while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2478,13 +2484,13 @@ static int core_scsi3_pro_reserve( if ((pr_res_holder->pr_res_type != type) || (pr_res_holder->pr_res_scope != scope)) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + pr_err("SPC-3 PR: Attempted RESERVE from" " [%s]: %s trying to change TYPE and/or SCOPE," " while reservation already held by [%s]: %s," " returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2516,22 +2522,22 @@ static int core_scsi3_pro_reserve( prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" + pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), + cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", - CMD_TFO(cmd)->get_fabric_name(), + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata" " for RESERVE\n"); } @@ -2558,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve( ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); break; default: - printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" + pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" " 0x%02x\n", type); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -2587,12 +2593,12 @@ static void __core_scsi3_complete_pro_release( */ dev->dev_pr_res_holder = NULL; - printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" + pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" " reservation holder TYPE: %s ALL_TG_PT: %d\n", tfo->get_fabric_name(), (explict) ? "explict" : "implict", core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", + pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", tfo->get_fabric_name(), se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); /* @@ -2608,22 +2614,22 @@ static int core_scsi3_emulate_pro_release( u64 res_key) { struct se_device *dev = cmd->se_dev; - struct se_session *se_sess = SE_SESS(cmd); - struct se_lun *se_lun = SE_LUN(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_lun *se_lun = cmd->se_lun; struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; int ret, all_reg = 0; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } /* * Locate the existing *pr_reg via struct se_node_acl pointers */ pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RELEASE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2641,7 +2647,7 @@ static int core_scsi3_emulate_pro_release( */ spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { + if (!pr_res_holder) { /* * No persistent reservation, return GOOD status. */ @@ -2678,7 +2684,7 @@ static int core_scsi3_emulate_pro_release( * that is registered with the logical unit for the I_T nexus; */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" + pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); spin_unlock(&dev->dev_reservation_lock); @@ -2694,13 +2700,13 @@ static int core_scsi3_emulate_pro_release( if ((pr_res_holder->pr_res_type != type) || (pr_res_holder->pr_res_scope != scope)) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" + pr_err("SPC-3 PR RELEASE: Attempted to release" " reservation from [%s]: %s with different TYPE " "and/or SCOPE while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2758,11 +2764,11 @@ static int core_scsi3_emulate_pro_release( write_aptpl: if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); } core_scsi3_put_pr_reg(pr_reg); @@ -2775,18 +2781,18 @@ static int core_scsi3_emulate_pro_clear( { struct se_device *dev = cmd->se_dev; struct se_node_acl *pr_reg_nacl; - struct se_session *se_sess = SE_SESS(cmd); - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct se_session *se_sess = cmd->se_sess; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; u32 pr_res_mapped_lun = 0; int calling_it_nexus = 0; /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_n)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for CLEAR\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2802,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear( * that is registered with the logical unit for the I_T nexus. */ if (res_key != pr_reg_n->pr_res_key) { - printk(KERN_ERR "SPC-3 PR REGISTER: Received" + pr_err("SPC-3 PR REGISTER: Received" " res_key: 0x%016Lx does not match" " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); @@ -2839,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear( * command with CLEAR service action was received, with the * additional sense code set to RESERVATIONS PREEMPTED. */ - if (!(calling_it_nexus)) + if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", - CMD_TFO(cmd)->get_fabric_name()); + pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", + cmd->se_tfo->get_fabric_name()); if (pr_tmpl->pr_aptpl_active) { - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); + pr_debug("SPC-3 PR: Updated APTPL metadata" " for CLEAR\n"); } @@ -2889,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt( pr_reg->pr_res_type = type; pr_reg->pr_res_scope = scope; - printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" + pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", + pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); /* @@ -2920,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort( if (pr_reg_holder == pr_reg) continue; if (pr_reg->pr_res_holder) { - printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); + pr_warn("pr_reg->pr_res_holder still set\n"); continue; } @@ -2954,25 +2960,25 @@ static int core_scsi3_pro_preempt( u64 sa_res_key, int abort) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_node_acl *pr_reg_nacl; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct list_head preempt_and_abort_list; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; u32 pr_res_mapped_lun = 0; int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int prh_type = 0, prh_scope = 0, ret; - if (!(se_sess)) + if (!se_sess) return PYX_TRANSPORT_LU_COMM_FAILURE; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_n)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -2982,7 +2988,7 @@ static int core_scsi3_pro_preempt( return PYX_TRANSPORT_RESERVATION_CONFLICT; } if (scope != PR_SCOPE_LU_SCOPE) { - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -2995,7 +3001,7 @@ static int core_scsi3_pro_preempt( (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) all_reg = 1; - if (!(all_reg) && !(sa_res_key)) { + if (!all_reg && !sa_res_key) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -3009,7 +3015,7 @@ static int core_scsi3_pro_preempt( * server shall perform a preempt by doing the following in an * uninterrupted series of actions. (See below..) */ - if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { + if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { /* * No existing or SA Reservation Key matching reservations.. * @@ -3036,7 +3042,7 @@ static int core_scsi3_pro_preempt( * was received, with the additional sense code set * to REGISTRATIONS PREEMPTED. */ - if (!(all_reg)) { + if (!all_reg) { if (pr_reg->pr_res_key != sa_res_key) continue; @@ -3076,7 +3082,7 @@ static int core_scsi3_pro_preempt( NULL, 0); released_regs++; } - if (!(calling_it_nexus)) + if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); @@ -3089,7 +3095,7 @@ static int core_scsi3_pro_preempt( * registered reservation key, then the device server shall * complete the command with RESERVATION CONFLICT status. */ - if (!(released_regs)) { + if (!released_regs) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -3111,17 +3117,17 @@ static int core_scsi3_pro_preempt( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk(KERN_INFO "SPC-3 PR: Updated APTPL" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL" " metadata for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(SE_DEV(cmd)); + core_scsi3_pr_generation(cmd->se_dev); return 0; } /* @@ -3247,16 +3253,16 @@ static int core_scsi3_pro_preempt( } if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" "%s\n", (abort) ? "_AND_ABORT" : ""); } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(SE_DEV(cmd)); + core_scsi3_pr_generation(cmd->se_dev); return 0; } @@ -3281,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt( res_key, sa_res_key, abort); break; default: - printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" + pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3297,17 +3303,17 @@ static int core_scsi3_emulate_pro_register_and_move( int aptpl, int unreg) { - struct se_session *se_sess = SE_SESS(cmd); - struct se_device *dev = SE_DEV(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve, *dest_se_deve = NULL; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; struct se_port *se_port; struct se_portal_group *se_tpg, *dest_se_tpg = NULL; struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; + unsigned char *buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3315,14 +3321,14 @@ static int core_scsi3_emulate_pro_register_and_move( unsigned short rtpi; unsigned char proto_ident; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } memset(dest_iport, 0, 64); memset(i_buf, 0, PR_REG_ISID_ID_LEN); se_tpg = se_sess->se_tpg; - tf_ops = TPG_TFO(se_tpg); + tf_ops = se_tpg->se_tpg_tfo; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; /* * Follow logic from spc4r17 Section 5.7.8, Table 50 -- @@ -3330,10 +3336,10 @@ static int core_scsi3_emulate_pro_register_and_move( * * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" " *pr_reg for REGISTER_AND_MOVE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -3342,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move( * provided during this initiator's I_T nexus registration. */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" " res_key: 0x%016Lx does not match existing SA REGISTER" " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); @@ -3351,26 +3357,30 @@ static int core_scsi3_emulate_pro_register_and_move( /* * The service active reservation key needs to be non zero */ - if (!(sa_res_key)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" + if (!sa_res_key) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" " sa_res_key\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } + /* * Determine the Relative Target Port Identifier where the reservation * will be moved to for the TransportID containing SCSI initiator WWN * information. */ + buf = transport_kmap_first_data_page(cmd); rtpi = (buf[18] & 0xff) << 8; rtpi |= buf[19] & 0xff; tid_len = (buf[20] & 0xff) << 24; tid_len |= (buf[21] & 0xff) << 16; tid_len |= (buf[22] & 0xff) << 8; tid_len |= buf[23] & 0xff; + transport_kunmap_first_data_page(cmd); + buf = NULL; if ((tid_len + 24) != cmd->data_length) { - printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" + pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" " does not equal CDB data_length: %u\n", tid_len, cmd->data_length); core_scsi3_put_pr_reg(pr_reg); @@ -3382,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move( if (se_port->sep_rtpi != rtpi) continue; dest_se_tpg = se_port->sep_tpg; - if (!(dest_se_tpg)) + if (!dest_se_tpg) continue; - dest_tf_ops = TPG_TFO(dest_se_tpg); - if (!(dest_tf_ops)) + dest_tf_ops = dest_se_tpg->se_tpg_tfo; + if (!dest_tf_ops) continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); @@ -3394,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move( ret = core_scsi3_tpg_depend_item(dest_se_tpg); if (ret != 0) { - printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" + pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); @@ -3407,20 +3417,22 @@ static int core_scsi3_emulate_pro_register_and_move( } spin_unlock(&dev->se_port_lock); - if (!(dest_se_tpg) || (!dest_tf_ops)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + if (!dest_se_tpg || !dest_tf_ops) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " fabric ops from Relative Target Port Identifier:" " %hu\n", rtpi); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } + + buf = transport_kmap_first_data_page(cmd); proto_ident = (buf[24] & 0x0f); #if 0 - printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" " 0x%02x\n", proto_ident); #endif if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" + pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" " proto_ident: 0x%02x does not match ident: 0x%02x" " from fabric: %s\n", proto_ident, dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), @@ -3429,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move( goto out; } if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" + pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" " containg a valid tpg_parse_pr_out_transport_id" " function pointer\n"); ret = PYX_TRANSPORT_LU_COMM_FAILURE; @@ -3437,14 +3449,17 @@ static int core_scsi3_emulate_pro_register_and_move( } initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, (const char *)&buf[24], &tmp_tid_len, &iport_ptr); - if (!(initiator_str)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + if (!initiator_str) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; } - printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" + transport_kunmap_first_data_page(cmd); + buf = NULL; + + pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? "port" : "device", initiator_str, (iport_ptr != NULL) ? iport_ptr : ""); @@ -3459,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move( pr_reg_nacl = pr_reg->pr_reg_nacl; matching_iname = (!strcmp(initiator_str, pr_reg_nacl->initiatorname)) ? 1 : 0; - if (!(matching_iname)) + if (!matching_iname) goto after_iport_check; - if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" + if (!iport_ptr || !pr_reg->isid_present_at_reg) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" " matches: %s on received I_T Nexus\n", initiator_str, pr_reg_nacl->initiatorname); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; } - if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" + if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" " matches: %s %s on received I_T Nexus\n", initiator_str, iport_ptr, pr_reg_nacl->initiatorname, pr_reg->pr_reg_isid); @@ -3490,8 +3505,8 @@ after_iport_check: } spin_unlock_bh(&dest_se_tpg->acl_node_lock); - if (!(dest_node_acl)) { - printk(KERN_ERR "Unable to locate %s dest_node_acl for" + if (!dest_node_acl) { + pr_err("Unable to locate %s dest_node_acl for" " TransportID%s\n", dest_tf_ops->get_fabric_name(), initiator_str); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -3499,7 +3514,7 @@ after_iport_check: } ret = core_scsi3_nodeacl_depend_item(dest_node_acl); if (ret != 0) { - printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" + pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); @@ -3508,7 +3523,7 @@ after_iport_check: goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" " %s from TransportID\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname); #endif @@ -3517,8 +3532,8 @@ after_iport_check: * PORT IDENTIFIER. */ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); - if (!(dest_se_deve)) { - printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve from RTPI:" " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; @@ -3526,7 +3541,7 @@ after_iport_check: ret = core_scsi3_lunacl_depend_item(dest_se_deve); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); + pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); dest_se_deve = NULL; @@ -3534,7 +3549,7 @@ after_iport_check: goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" " ACL for dest_se_deve->mapped_lun: %u\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); @@ -3545,8 +3560,8 @@ after_iport_check: */ spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" + if (!pr_res_holder) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" " currently held\n"); spin_unlock(&dev->dev_reservation_lock); ret = PYX_TRANSPORT_INVALID_CDB_FIELD; @@ -3559,7 +3574,7 @@ after_iport_check: * Register behaviors for a REGISTER AND MOVE service action */ if (pr_res_holder != pr_reg) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" " Nexus is not reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); ret = PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -3576,7 +3591,7 @@ after_iport_check: */ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" " reservation for type: %s\n", core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); spin_unlock(&dev->dev_reservation_lock); @@ -3611,8 +3626,8 @@ after_iport_check: */ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); - if (!(dest_pr_reg)) { - ret = core_scsi3_alloc_registration(SE_DEV(cmd), + if (!dest_pr_reg) { + ret = core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { @@ -3644,16 +3659,16 @@ after_iport_check: /* * Increment PRGeneration for existing registrations.. */ - if (!(new_reg)) + if (!new_reg) dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" + pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" " created new reservation holder TYPE: %s on object RTPI:" " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), core_scsi3_pr_dump_type(type), rtpi, dest_pr_reg->pr_res_generation); - printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" + pr_debug("SPC-3 PR Successfully moved reservation from" " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), @@ -3681,24 +3696,28 @@ after_iport_check: * Clear the APTPL metadata if APTPL has been disabled, otherwise * write out the updated metadata to struct file for this SCSI device. */ - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated for" + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER_AND_MOVE\n"); } else { pr_tmpl->pr_aptpl_active = 1; - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &dest_pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Set APTPL Bit Activated for" + if (!ret) + pr_debug("SPC-3 PR: Set APTPL Bit Activated for" " REGISTER_AND_MOVE\n"); } + transport_kunmap_first_data_page(cmd); + core_scsi3_put_pr_reg(dest_pr_reg); return 0; out: + if (buf) + transport_kunmap_first_data_page(cmd); if (dest_se_deve) core_scsi3_lunacl_undepend_item(dest_se_deve); if (dest_node_acl) @@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3731,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. */ - if (!(SE_SESS(cmd))) + if (!cmd->se_sess) return PYX_TRANSPORT_LU_COMM_FAILURE; if (cmd->data_length < 24) { - printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list" + pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) sa = (cdb[1] & 0x1f); scope = (cdb[2] & 0xf0); type = (cdb[2] & 0x0f); + + buf = transport_kmap_first_data_page(cmd); /* * From PERSISTENT_RESERVE_OUT parameter list (payload) */ @@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) aptpl = (buf[17] & 0x01); unreg = (buf[17] & 0x02); } + transport_kunmap_first_data_page(cmd); + buf = NULL; + /* * SPEC_I_PT=1 is only valid for Service action: REGISTER */ @@ -3776,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) * the sense key set to ILLEGAL REQUEST, and the additional sense * code set to PARAMETER LIST LENGTH ERROR. */ - if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && + if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && (cmd->data_length != 24)) { - printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter" + pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -3812,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) return core_scsi3_emulate_pro_register_and_move(cmd, res_key, sa_res_key, aptpl, unreg); default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3827,25 +3851,26 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) */ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_device *se_dev = cmd->se_dev; + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" + pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf = transport_kmap_first_data_page(cmd); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { /* * Check for overflow of 8byte PRI READ_KEYS payload and @@ -3865,13 +3890,15 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) add_len += 8; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); buf[4] = ((add_len >> 24) & 0xff); buf[5] = ((add_len >> 16) & 0xff); buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -3882,23 +3909,24 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) */ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_device *se_dev = cmd->se_dev; + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" + pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf = transport_kmap_first_data_page(cmd); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); spin_lock(&se_dev->dev_reservation_lock); pr_reg = se_dev->dev_pr_res_holder; @@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); - if (cmd->data_length < 22) { - spin_unlock(&se_dev->dev_reservation_lock); - return 0; - } + if (cmd->data_length < 22) + goto err; + /* * Set the Reservation key. * @@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) buf[21] = (pr_reg->pr_res_scope & 0xf0) | (pr_reg->pr_res_type & 0x0f); } + +err: spin_unlock(&se_dev->dev_reservation_lock); + transport_kunmap_first_data_page(cmd); return 0; } @@ -3963,17 +3993,19 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) */ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct se_device *dev = cmd->se_dev; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; + unsigned char *buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { - printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" + pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" " %u too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } + buf = transport_kmap_first_data_page(cmd); + buf[0] = ((add_len << 8) & 0xff); buf[1] = (add_len & 0xff); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ @@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + transport_kunmap_first_data_page(cmd); + return 0; } @@ -4014,27 +4048,29 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) */ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); + struct se_device *se_dev = cmd->se_dev; struct se_node_acl *se_nacl; - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; - struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; + unsigned char *buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" + pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf = transport_kmap_first_data_page(cmd); + + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); spin_lock(&pr_tmpl->registration_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, @@ -4051,11 +4087,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Determine expected length of $FABRIC_MOD specific * TransportID full status descriptor.. */ - exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( + exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( se_tpg, se_nacl, pr_reg, &format_code); if ((exp_desc_len + add_len) > cmd->data_length) { - printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" + pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); @@ -4105,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) * bit is set to one, the contents of the RELATIVE TARGET PORT * IDENTIFIER field are not defined by this standard. */ - if (!(pr_reg->pr_reg_all_tg_pt)) { + if (!pr_reg->pr_reg_all_tg_pt) { struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; buf[off++] = ((port->sep_rtpi >> 8) & 0xff); @@ -4116,7 +4152,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) /* * Now, have the $FABRIC_MOD fill in the protocol identifier */ - desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, + desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, se_nacl, pr_reg, &format_code, &buf[off+4]); spin_lock(&pr_tmpl->registration_lock); @@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -4165,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) case PRI_READ_FULL_STATUS: return core_scsi3_pri_read_full_status(cmd); default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" + pr_err("Unknown PERSISTENT_RESERVE_IN service" " action: 0x%02x\n", cdb[1] & 0x1f); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -4174,7 +4212,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) int core_scsi3_emulate_pr(struct se_cmd *cmd) { - unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task_cdb[0]; struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: @@ -4186,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd) * CONFLICT status. */ if (dev->dev_flags & DF_SPC2_RESERVATIONS) { - printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" + pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -4213,39 +4251,39 @@ static int core_pt_seq_non_holder( int core_setup_reservations(struct se_device *dev, int force_pt) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_reservation_template *rest = &su_dev->t10_reservation; + struct t10_reservation *rest = &su_dev->t10_pr; /* * If this device is from Target_Core_Mod/pSCSI, use the reservations * of the Underlying SCSI hardware. In Linux/SCSI terms, this can * cause a problem because libata and some SATA RAID HBAs appear * under Linux/SCSI, but to emulate reservations themselves. */ - if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && - !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { + if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { rest->res_type = SPC_PASSTHROUGH; rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; - printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" - " emulation\n", TRANSPORT(dev)->name); + pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" + " emulation\n", dev->transport->name); return 0; } /* * If SPC-3 or above is reported by real or emulated struct se_device, * use emulated Persistent Reservations. */ - if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { + if (dev->transport->get_device_rev(dev) >= SCSI_3) { rest->res_type = SPC3_PERSISTENT_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; - printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" - " emulation\n", TRANSPORT(dev)->name); + pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS" + " emulation\n", dev->transport->name); } else { rest->res_type = SPC2_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi2_reservation_seq_non_holder; - printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", - TRANSPORT(dev)->name); + pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", + dev->transport->name); } return 0; diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 5603bcfd86d..c8f47d06458 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); extern int core_scsi2_emulate_crh(struct se_cmd *); extern int core_scsi3_alloc_aptpl_registration( - struct t10_reservation_template *, u64, + struct t10_reservation *, u64, unsigned char *, unsigned char *, u32, unsigned char *, u16, u32, int, int, u8); extern int core_scsi3_check_aptpl_registration(struct se_device *, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 331d423fd0e..2b7b0da9146 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template; static void pscsi_req_done(struct request *, int); -/* pscsi_get_sh(): - * - * - */ -static struct Scsi_Host *pscsi_get_sh(u32 host_no) -{ - struct Scsi_Host *sh = NULL; - - sh = scsi_host_lookup(host_no); - if (IS_ERR(sh)) { - printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" - " %u\n", host_no); - return NULL; - } - - return sh; -} - /* pscsi_attach_hba(): * * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. @@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no) */ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) { - int hba_depth; struct pscsi_hba_virt *phv; phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); - if (!(phv)) { - printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); - return -1; + if (!phv) { + pr_err("Unable to allocate struct pscsi_hba_virt\n"); + return -ENOMEM; } phv->phv_host_id = host_id; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); - hba->hba_ptr = (void *)phv; + hba->hba_ptr = phv; - printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, PSCSI_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" - " Target Core with TCQ Depth: %d\n", hba->hba_id, - atomic_read(&hba->max_queue_depth)); + pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", + hba->hba_id); return 0; } @@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba) if (scsi_host) { scsi_host_put(scsi_host); - printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" + pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" " Generic Target Core\n", hba->hba_id, (scsi_host->hostt->name) ? (scsi_host->hostt->name) : "Unknown"); } else - printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" + pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" " from Generic Target Core\n", hba->hba_id); kfree(phv); @@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) { struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; struct Scsi_Host *sh = phv->phv_lld_host; - int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; /* * Release the struct Scsi_Host */ - if (!(mode_flag)) { - if (!(sh)) + if (!mode_flag) { + if (!sh) return 0; phv->phv_lld_host = NULL; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); - printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" + pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" " %s\n", hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); @@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) * Otherwise, locate struct Scsi_Host from the original passed * pSCSI Host ID and enable for phba mode */ - sh = pscsi_get_sh(phv->phv_host_id); - if (!(sh)) { - printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" + sh = scsi_host_lookup(phv->phv_host_id); + if (IS_ERR(sh)) { + pr_err("pSCSI: Unable to locate SCSI Host for" " phv_host_id: %d\n", phv->phv_host_id); - return -1; + return PTR_ERR(sh); } - /* - * Usually the SCSI LLD will use the hostt->can_queue value to define - * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set - * this at all and set sh->can_queue at runtime. - */ - hba_depth = (sh->hostt->can_queue > sh->can_queue) ? - sh->hostt->can_queue : sh->can_queue; - - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); phv->phv_lld_host = sh; phv->phv_mode = PHV_LLD_SCSI_HOST_NO; - printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", + pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); return 1; @@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); if (!buf) - return -1; + return -ENOMEM; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = INQUIRY; @@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) out_free: kfree(buf); - return -1; + return -EPERM; } static void @@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, page_83 = &buf[off]; ident_len = page_83[3]; if (!ident_len) { - printk(KERN_ERR "page_83[3]: identifier" + pr_err("page_83[3]: identifier" " length zero!\n"); break; } - printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); + pr_debug("T10 VPD Identifer Length: %d\n", ident_len); vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); if (!vpd) { - printk(KERN_ERR "Unable to allocate memory for" + pr_err("Unable to allocate memory for" " struct t10_vpd\n"); goto out; } @@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list( if (!sd->queue_depth) { sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; - printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" + pr_err("Set broken SCSI Device %d:%d:%d" " queue_depth to %d\n", sd->channel, sd->id, sd->lun, sd->queue_depth); } @@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list( q = sd->request_queue; limits = &dev_limits.limits; limits->logical_block_size = sd->sector_size; - limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? - queue_max_hw_sectors(q) : sd->host->max_sectors; - limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? - queue_max_sectors(q) : sd->host->max_sectors; + limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); dev_limits.hw_queue_depth = sd->queue_depth; dev_limits.queue_depth = sd->queue_depth; /* @@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list( pdv->pdv_sd = sd; dev = transport_add_device_to_core_hba(hba, &pscsi_template, - se_dev, dev_flags, (void *)pdv, + se_dev, dev_flags, pdv, &dev_limits, NULL, NULL); - if (!(dev)) { + if (!dev) { pdv->pdv_sd = NULL; return NULL; } @@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) struct pscsi_dev_virt *pdv; pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); - if (!(pdv)) { - printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); + if (!pdv) { + pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); return NULL; } pdv->pdv_se_hba = hba; - printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); - return (void *)pdv; + pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); + return pdv; } /* @@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk( u32 dev_flags = 0; if (scsi_device_get(sd)) { - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", sh->host_no, sd->channel, sd->id, sd->lun); spin_unlock_irq(sh->host_lock); return NULL; @@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk( bd = blkdev_get_by_path(se_dev->se_dev_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); if (IS_ERR(bd)) { - printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); + pr_err("pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); return NULL; } pdv->pdv_bd = bd; dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) { + if (!dev) { blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); scsi_device_put(sd); return NULL; } - printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); return dev; @@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom( u32 dev_flags = 0; if (scsi_device_get(sd)) { - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", sh->host_no, sd->channel, sd->id, sd->lun); spin_unlock_irq(sh->host_lock); return NULL; @@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom( spin_unlock_irq(sh->host_lock); dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) { + if (!dev) { scsi_device_put(sd); return NULL; } - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, sd->channel, sd->id, sd->lun); @@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other( spin_unlock_irq(sh->host_lock); dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) + if (!dev) return NULL; - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, sd->channel, sd->id, sd->lun); @@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice( struct Scsi_Host *sh = phv->phv_lld_host; int legacy_mode_enable = 0; - if (!(pdv)) { - printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" + if (!pdv) { + pr_err("Unable to locate struct pscsi_dev_virt" " parameter\n"); return ERR_PTR(-EINVAL); } @@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice( * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the * struct Scsi_Host we will need to bring the TCM/pSCSI object online */ - if (!(sh)) { + if (!sh) { if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { - printk(KERN_ERR "pSCSI: Unable to locate struct" + pr_err("pSCSI: Unable to locate struct" " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); return ERR_PTR(-ENODEV); } @@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice( * reference, we enforce that udev_path has been set */ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { - printk(KERN_ERR "pSCSI: udev_path attribute has not" + pr_err("pSCSI: udev_path attribute has not" " been set before ENABLE=1\n"); return ERR_PTR(-EINVAL); } @@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice( */ if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { spin_lock(&hba->device_lock); - if (!(list_empty(&hba->hba_dev_list))) { - printk(KERN_ERR "pSCSI: Unable to set hba_mode" + if (!list_empty(&hba->hba_dev_list)) { + pr_err("pSCSI: Unable to set hba_mode" " with active devices\n"); spin_unlock(&hba->device_lock); return ERR_PTR(-EEXIST); @@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice( hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; sh = phv->phv_lld_host; } else { - sh = pscsi_get_sh(pdv->pdv_host_id); - if (!(sh)) { - printk(KERN_ERR "pSCSI: Unable to locate" + sh = scsi_host_lookup(pdv->pdv_host_id); + if (IS_ERR(sh)) { + pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return ERR_PTR(-ENODEV); + return (struct se_device *) sh; } } } else { if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { - printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" + pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" " struct Scsi_Host exists\n"); return ERR_PTR(-EEXIST); } @@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice( break; } - if (!(dev)) { + if (!dev) { if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) scsi_host_put(sh); else if (legacy_mode_enable) { @@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice( } spin_unlock_irq(sh->host_lock); - printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, + pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) @@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task) */ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { - if (!TASK_CMD(task)->se_deve) + if (!task->task_se_cmd->se_deve) goto after_mode_sense; - if (TASK_CMD(task)->se_deve->lun_flags & + if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = (unsigned char *) - T_TASK(task->task_se_cmd)->t_task_buf; + unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task) if (!(buf[2] & 0x80)) buf[2] |= 0x80; } + + transport_kunmap_first_data_page(task->task_se_cmd); } } after_mode_sense: @@ -766,8 +729,8 @@ after_mode_sense: u32 blocksize; buf = sg_virt(&sg[0]); - if (!(buf)) { - printk(KERN_ERR "Unable to get buf for scatterlist\n"); + if (!buf) { + pr_err("Unable to get buf for scatterlist\n"); goto after_mode_select; } @@ -797,34 +760,20 @@ after_mode_select: } static struct se_task * -pscsi_alloc_task(struct se_cmd *cmd) +pscsi_alloc_task(unsigned char *cdb) { struct pscsi_plugin_task *pt; - unsigned char *cdb = T_TASK(cmd)->t_task_cdb; - pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); + /* + * Dynamically alloc cdb space, since it may be larger than + * TCM_MAX_COMMAND_SIZE + */ + pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); if (!pt) { - printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); + pr_err("Unable to allocate struct pscsi_plugin_task\n"); return NULL; } - /* - * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, - * allocate the extended CDB buffer for per struct se_task context - * pt->pscsi_cdb now. - */ - if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { - - pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(pt->pscsi_cdb)) { - printk(KERN_ERR "pSCSI: Unable to allocate extended" - " pt->pscsi_cdb\n"); - kfree(pt); - return NULL; - } - } else - pt->pscsi_cdb = &pt->__pscsi_cdb[0]; - return &pt->pscsi_task; } @@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request( * also set the end_io_data pointer.to struct se_task. */ req->end_io = pscsi_req_done; - req->end_io_data = (void *)task; + req->end_io_data = task; /* * Load the referenced struct se_task's SCSI CDB into * include/linux/blkdev.h:struct request->cmd @@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request( /* * Setup pointer for outgoing sense data. */ - req->sense = (void *)&pt->pscsi_sense[0]; + req->sense = &pt->pscsi_sense[0]; req->sense_len = 0; } @@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task) pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, (task->task_data_direction == DMA_TO_DEVICE), GFP_KERNEL); - if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { - printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", + if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { + pr_err("PSCSI: blk_get_request() failed: %ld\n", IS_ERR(pt->pscsi_req)); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -920,15 +869,8 @@ static int pscsi_do_task(struct se_task *task) static void pscsi_free_task(struct se_task *task) { struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct se_cmd *cmd = task->task_se_cmd; /* - * Release the extended CDB allocation from pscsi_alloc_task() - * if one exists. - */ - if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) - kfree(pt->pscsi_cdb); - /* * We do not release the bio(s) here associated with this task, as * this is handled by bio_put() and pscsi_bi_endio(). */ @@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, switch (token) { case Opt_scsi_host_id: if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { - printk(KERN_ERR "PSCSI[%d]: Unable to accept" + pr_err("PSCSI[%d]: Unable to accept" " scsi_host_id while phv_mode ==" " PHV_LLD_SCSI_HOST_NO\n", phv->phv_host_id); @@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, } match_int(args, &arg); pdv->pdv_host_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" + pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" " %d\n", phv->phv_host_id, pdv->pdv_host_id); pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; break; case Opt_scsi_channel_id: match_int(args, &arg); pdv->pdv_channel_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" + pr_debug("PSCSI[%d]: Referencing SCSI Channel" " ID: %d\n", phv->phv_host_id, pdv->pdv_channel_id); pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; @@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, case Opt_scsi_target_id: match_int(args, &arg); pdv->pdv_target_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" + pr_debug("PSCSI[%d]: Referencing SCSI Target" " ID: %d\n", phv->phv_host_id, pdv->pdv_target_id); pdv->pdv_flags |= PDF_HAS_TARGET_ID; @@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, case Opt_scsi_lun_id: match_int(args, &arg); pdv->pdv_lun_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" + pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" " %d\n", phv->phv_host_id, pdv->pdv_lun_id); pdv->pdv_flags |= PDF_HAS_LUN_ID; break; @@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params( if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { - printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" + pr_err("Missing scsi_channel_id=, scsi_target_id= and" " scsi_lun_id= parameters\n"); - return -1; + return -EINVAL; } return 0; @@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error) bio_put(bio); } -static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) +static inline struct bio *pscsi_get_bio(int sg_num) { struct bio *bio; /* @@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) * in block/blk-core.c:blk_make_request() */ bio = bio_kmalloc(GFP_KERNEL, sg_num); - if (!(bio)) { - printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); + if (!bio) { + pr_err("PSCSI: bio_kmalloc() failed\n"); return NULL; } bio->bi_end_io = pscsi_bi_endio; @@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) return bio; } -#if 0 -#define DEBUG_PSCSI(x...) printk(x) -#else -#define DEBUG_PSCSI(x...) -#endif - -static int __pscsi_map_task_SG( +static int __pscsi_map_SG( struct se_task *task, struct scatterlist *task_sg, u32 task_sg_num, @@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG( return 0; /* * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup - * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> + * the bio_vec maplist from task->task_sg -> * struct scatterlist memory. The struct se_task->task_sg[] currently needs * to be attached to struct bios for submission to Linux/SCSI using * struct request to struct scsi_device->request_queue. @@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG( * is ported to upstream SCSI passthrough functionality that accepts * struct scatterlist->page_link or struct page as a paraemeter. */ - DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); for_each_sg(task_sg, sg, task_sg_num, i) { page = sg_page(sg); off = sg->offset; len = sg->length; - DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, + pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, page, len, off); while (len > 0 && data_len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); - if (!(bio)) { + if (!bio) { nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); nr_pages -= nr_vecs; /* * Calls bio_kmalloc() and sets bio->bi_end_io() */ - bio = pscsi_get_bio(pdv, nr_vecs); - if (!(bio)) + bio = pscsi_get_bio(nr_vecs); + if (!bio) goto fail; if (rw) bio->bi_rw |= REQ_WRITE; - DEBUG_PSCSI("PSCSI: Allocated bio: %p," + pr_debug("PSCSI: Allocated bio: %p," " dir: %s nr_vecs: %d\n", bio, (rw) ? "rw" : "r", nr_vecs); /* @@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG( tbio = tbio->bi_next = bio; } - DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" + pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" " bio: %p page: %p len: %d off: %d\n", i, bio, page, len, off); @@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG( if (rc != bytes) goto fail; - DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", + pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", bio->bi_vcnt, nr_vecs); if (bio->bi_vcnt > nr_vecs) { - DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" + pr_debug("PSCSI: Reached bio->bi_vcnt max:" " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); /* @@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG( * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] */ - if (!(bidi_read)) { + if (!bidi_read) { /* * Starting with v2.6.31, call blk_make_request() passing in *hbio to * allocate the pSCSI task a struct request. */ pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); - if (!(pt->pscsi_req)) { - printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); + if (!pt->pscsi_req) { + pr_err("pSCSI: blk_make_request() failed\n"); goto fail; } /* @@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG( */ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - return task->task_sg_num; + return task->task_sg_nents; } /* * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND @@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG( */ pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); - if (!(pt->pscsi_req->next_rq)) { - printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); + if (!pt->pscsi_req->next_rq) { + pr_err("pSCSI: blk_make_request() failed for BIDI\n"); goto fail; } pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); - return task->task_sg_num; + return task->task_sg_nents; fail: while (hbio) { bio = hbio; @@ -1262,7 +1198,10 @@ fail: return ret; } -static int pscsi_map_task_SG(struct se_task *task) +/* + * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. + */ +static int pscsi_map_SG(struct se_task *task) { int ret; @@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); + ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); if (ret >= 0 && task->task_sg_bidi) { /* * If present, set up the extra BIDI-COMMAND SCSI READ * struct request and payload. */ - ret = __pscsi_map_task_SG(task, task->task_sg_bidi, - task->task_sg_num, 1); + ret = __pscsi_map_SG(task, task->task_sg_bidi, + task->task_sg_nents, 1); } if (ret < 0) @@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task) return 0; } -/* pscsi_map_task_non_SG(): - * - * - */ -static int pscsi_map_task_non_SG(struct se_task *task) -{ - struct se_cmd *cmd = TASK_CMD(task); - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - int ret = 0; - - if (pscsi_blk_get_request(task) < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - - if (!task->task_size) - return 0; - - ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, T_TASK(cmd)->t_task_buf, - task->task_size, GFP_KERNEL); - if (ret < 0) { - printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - return 0; -} - static int pscsi_CDB_none(struct se_task *task) { return pscsi_blk_get_request(task); @@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status( struct pscsi_plugin_task *pt) { task->task_scsi_status = status_byte(pt->pscsi_result); - if ((task->task_scsi_status)) { + if (task->task_scsi_status) { task->task_scsi_status <<= 1; - printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" + pr_debug("PSCSI Status Byte exception at task: %p CDB:" " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); } @@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status( transport_complete_task(task, (!task->task_scsi_status)); break; default: - printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" + pr_debug("PSCSI Host Byte exception at task: %p CDB:" " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - TASK_CMD(task)->transport_error_status = + task->task_se_cmd->transport_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; transport_complete_task(task, 0); break; } - - return; } static void pscsi_req_done(struct request *req, int uptodate) @@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = { .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .cdb_none = pscsi_CDB_none, - .map_task_non_SG = pscsi_map_task_non_SG, - .map_task_SG = pscsi_map_task_SG, + .map_control_SG = pscsi_map_SG, + .map_data_SG = pscsi_map_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index a4cd5d352c3..ebf4f1ae2c8 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -2,7 +2,6 @@ #define TARGET_CORE_PSCSI_H #define PSCSI_VERSION "v4.0" -#define PSCSI_VIRTUAL_HBA_DEPTH 2048 /* used in pscsi_find_alloc_len() */ #ifndef INQUIRY_DATA_SIZE @@ -24,13 +23,12 @@ struct pscsi_plugin_task { struct se_task pscsi_task; - unsigned char *pscsi_cdb; - unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; int pscsi_direction; int pscsi_result; u32 pscsi_resid; struct request *pscsi_req; + unsigned char pscsi_cdb[0]; } ____cacheline_aligned; #define PDF_HAS_CHANNEL_ID 0x01 diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 7837dd365a9..3dd81d24d9a 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -44,12 +44,8 @@ #include "target_core_rd.h" -static struct se_subsystem_api rd_dr_template; static struct se_subsystem_api rd_mcp_template; -/* #define DEBUG_RAMDISK_MCP */ -/* #define DEBUG_RAMDISK_DR */ - /* rd_attach_hba(): (Part of se_subsystem_api_t template) * * @@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) struct rd_host *rd_host; rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); - if (!(rd_host)) { - printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); + if (!rd_host) { + pr_err("Unable to allocate memory for struct rd_host\n"); return -ENOMEM; } rd_host->rd_host_id = host_id; - atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); - hba->hba_ptr = (void *) rd_host; + hba->hba_ptr = rd_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" - " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, - rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), - RD_MAX_SECTORS); + pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" + " MaxSectors: %u\n", hba->hba_id, + rd_host->rd_host_id, RD_MAX_SECTORS); return 0; } @@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba) { struct rd_host *rd_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" + pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); kfree(rd_host); @@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) for (j = 0; j < sg_per_table; j++) { pg = sg_page(&sg[j]); - if ((pg)) { + if (pg) { __free_page(pg); page_count++; } @@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) kfree(sg); } - printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); @@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) struct scatterlist *sg; if (rd_dev->rd_page_count <= 0) { - printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", + pr_err("Illegal page count: %u for Ramdisk device\n", rd_dev->rd_page_count); return -EINVAL; } @@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) sg_tables = (total_sg_needed / max_sg_per_table) + 1; sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); - if (!(sg_table)) { - printk(KERN_ERR "Unable to allocate memory for Ramdisk" + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk" " scatterlist tables\n"); return -ENOMEM; } @@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev) sg = kzalloc(sg_per_table * sizeof(struct scatterlist), GFP_KERNEL); - if (!(sg)) { - printk(KERN_ERR "Unable to allocate scatterlist array" + if (!sg) { + pr_err("Unable to allocate scatterlist array" " for struct rd_dev\n"); return -ENOMEM; } - sg_init_table((struct scatterlist *)&sg[0], sg_per_table); + sg_init_table(sg, sg_per_table); sg_table[i].sg_table = sg; sg_table[i].rd_sg_count = sg_per_table; @@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) for (j = 0; j < sg_per_table; j++) { pg = alloc_pages(GFP_KERNEL, 0); - if (!(pg)) { - printk(KERN_ERR "Unable to allocate scatterlist" + if (!pg) { + pr_err("Unable to allocate scatterlist" " pages for struct rd_dev_sg_table\n"); return -ENOMEM; } @@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) total_sg_needed -= sg_per_table; } - printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count); @@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice( struct rd_host *rd_host = hba->hba_ptr; rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); - if (!(rd_dev)) { - printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); + if (!rd_dev) { + pr_err("Unable to allocate memory for struct rd_dev\n"); return NULL; } @@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice( return rd_dev; } -static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) -{ - return rd_allocate_virtdevice(hba, name, 1); -} - static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) { return rd_allocate_virtdevice(hba, name, 0); @@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice( dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, - (rd_dev->rd_direct) ? &rd_dr_template : - &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, + &rd_mcp_template, se_dev, dev_flags, rd_dev, &dev_limits, prod, rev); - if (!(dev)) + if (!dev) goto fail; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_queue_depth = dev->queue_depth; - printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" + pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" " %u pages in %u tables, %lu total bytes\n", rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, @@ -296,14 +283,6 @@ fail: return ERR_PTR(ret); } -static struct se_device *rd_DIRECT_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p) -{ - return rd_create_virtdevice(hba, se_dev, p, 1); -} - static struct se_device *rd_MEMCPY_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, @@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task) } static struct se_task * -rd_alloc_task(struct se_cmd *cmd) +rd_alloc_task(unsigned char *cdb) { struct rd_request *rd_req; rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); if (!rd_req) { - printk(KERN_ERR "Unable to allocate struct rd_request\n"); + pr_err("Unable to allocate struct rd_request\n"); return NULL; } - rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; return &rd_req->rd_task; } @@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return sg_table; } - printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", + pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", page); return NULL; @@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) static int rd_MEMCPY_read(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_dev; + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req) u32 rd_offset = req->rd_offset; table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; + if (!table) + return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = task->task_sg; sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" + + pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif + src_offset = rd_offset; while (req->rd_size) { if ((sg_d[i].length - dst_offset) < (sg_s[j].length - src_offset)) { length = (sg_d[i].length - dst_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" + + pr_debug("Step 1 - sg_d[%d]: %p length: %d" " offset: %u sg_s[%d].length: %u\n", i, &sg_d[i], sg_d[i].length, sg_d[i].offset, j, sg_s[j].length); - printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" + pr_debug("Step 1 - length: %u dst_offset: %u" " src_offset: %u\n", length, dst_offset, src_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req) page_end = 0; } else { length = (sg_s[j].length - src_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" + + pr_debug("Step 2 - sg_d[%d]: %p length: %d" " offset: %u sg_s[%d].length: %u\n", i, &sg_d[i], sg_d[i].length, sg_d[i].offset, j, sg_s[j].length); - printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" + pr_debug("Step 2 - length: %u dst_offset: %u" " src_offset: %u\n", length, dst_offset, src_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req) memcpy(dst, src, length); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + pr_debug("page: %u, remaining size: %u, length: %u," " i: %u, j: %u\n", req->rd_page, (req->rd_size - length), length, i, j); -#endif + req->rd_size -= length; - if (!(req->rd_size)) + if (!req->rd_size) return 0; if (!page_end) continue; if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u in same page table\n", + pr_debug("page: %u in same page table\n", req->rd_page); -#endif continue; } -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "getting new page table for page: %u\n", + + pr_debug("getting new page table for page: %u\n", req->rd_page); -#endif + table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; + if (!table) + return -EINVAL; sg_s = &table->sg_table[j = 0]; } @@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req) static int rd_MEMCPY_write(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_dev; + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req) u32 rd_offset = req->rd_offset; table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; + if (!table) + return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; sg_s = task->task_sg; -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," + + pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif + dst_offset = rd_offset; while (req->rd_size) { if ((sg_s[i].length - src_offset) < (sg_d[j].length - dst_offset)) { length = (sg_s[i].length - src_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" + + pr_debug("Step 1 - sg_s[%d]: %p length: %d" " offset: %d sg_d[%d].length: %u\n", i, &sg_s[i], sg_s[i].length, sg_s[i].offset, j, sg_d[j].length); - printk(KERN_INFO "Step 1 - length: %u src_offset: %u" + pr_debug("Step 1 - length: %u src_offset: %u" " dst_offset: %u\n", length, src_offset, dst_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req) page_end = 0; } else { length = (sg_d[j].length - dst_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" + + pr_debug("Step 2 - sg_s[%d]: %p length: %d" " offset: %d sg_d[%d].length: %u\n", i, &sg_s[i], sg_s[i].length, sg_s[i].offset, j, sg_d[j].length); - printk(KERN_INFO "Step 2 - length: %u src_offset: %u" + pr_debug("Step 2 - length: %u src_offset: %u" " dst_offset: %u\n", length, src_offset, dst_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req) memcpy(dst, src, length); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + pr_debug("page: %u, remaining size: %u, length: %u," " i: %u, j: %u\n", req->rd_page, (req->rd_size - length), length, i, j); -#endif + req->rd_size -= length; - if (!(req->rd_size)) + if (!req->rd_size) return 0; if (!page_end) continue; if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u in same page table\n", + pr_debug("page: %u in same page table\n", req->rd_page); -#endif continue; } -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "getting new page table for page: %u\n", + + pr_debug("getting new page table for page: %u\n", req->rd_page); -#endif + table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; + if (!table) + return -EINVAL; sg_d = &table->sg_table[j = 0]; } @@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task) unsigned long long lba; int ret; - req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; + req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; lba = task->task_lba; req->rd_offset = (do_div(lba, - (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * - DEV_ATTRIB(dev)->block_size; + (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * + dev->se_sub_dev->se_dev_attrib.block_size; req->rd_size = task->task_size; if (task->task_data_direction == DMA_FROM_DEVICE) @@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task) return PYX_TRANSPORT_SENT_TO_TRANSPORT; } -/* rd_DIRECT_with_offset(): - * - * - */ -static int rd_DIRECT_with_offset( - struct se_task *task, - struct list_head *se_mem_list, - u32 *se_mem_cnt, - u32 *task_offset) -{ - struct rd_request *req = RD_REQ(task); - struct rd_dev *dev = req->rd_dev; - struct rd_dev_sg_table *table; - struct se_mem *se_mem; - struct scatterlist *sg_s; - u32 j = 0, set_offset = 1; - u32 get_next_table = 0, offset_length, table_sg_end; - - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; - - table_sg_end = (table->page_end_offset - req->rd_page); - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", - (task->task_data_direction == DMA_TO_DEVICE) ? - "Write" : "Read", - task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif - while (req->rd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; - } - INIT_LIST_HEAD(&se_mem->se_list); - - if (set_offset) { - offset_length = sg_s[j].length - req->rd_offset; - if (offset_length > req->rd_size) - offset_length = req->rd_size; - - se_mem->se_page = sg_page(&sg_s[j++]); - se_mem->se_off = req->rd_offset; - se_mem->se_len = offset_length; - - set_offset = 0; - get_next_table = (j > table_sg_end); - goto check_eot; - } - - offset_length = (req->rd_size < req->rd_offset) ? - req->rd_size : req->rd_offset; - - se_mem->se_page = sg_page(&sg_s[j]); - se_mem->se_len = offset_length; - - set_offset = 1; - -check_eot: -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" - " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", - req->rd_page, req->rd_size, offset_length, j, se_mem, - se_mem->se_page, se_mem->se_off, se_mem->se_len); -#endif - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; - - req->rd_size -= offset_length; - if (!(req->rd_size)) - goto out; - - if (!set_offset && !get_next_table) - continue; - - if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u in same page table\n", - req->rd_page); -#endif - continue; - } -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "getting new page table for page: %u\n", - req->rd_page); -#endif - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; - - sg_s = &table->sg_table[j = 0]; - } - -out: - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", - *se_mem_cnt); -#endif - return 0; -} - -/* rd_DIRECT_without_offset(): - * - * - */ -static int rd_DIRECT_without_offset( - struct se_task *task, - struct list_head *se_mem_list, - u32 *se_mem_cnt, - u32 *task_offset) -{ - struct rd_request *req = RD_REQ(task); - struct rd_dev *dev = req->rd_dev; - struct rd_dev_sg_table *table; - struct se_mem *se_mem; - struct scatterlist *sg_s; - u32 length, j = 0; - - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; - - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", - (task->task_data_direction == DMA_TO_DEVICE) ? - "Write" : "Read", - task->task_lba, req->rd_size, req->rd_page); -#endif - while (req->rd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; - } - INIT_LIST_HEAD(&se_mem->se_list); - - length = (req->rd_size < sg_s[j].length) ? - req->rd_size : sg_s[j].length; - - se_mem->se_page = sg_page(&sg_s[j++]); - se_mem->se_len = length; - -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," - " se_page: %p se_off: %u se_len: %u\n", req->rd_page, - req->rd_size, j, se_mem, se_mem->se_page, - se_mem->se_off, se_mem->se_len); -#endif - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; - - req->rd_size -= length; - if (!(req->rd_size)) - goto out; - - if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_DR - printk("page: %u in same page table\n", - req->rd_page); -#endif - continue; - } -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "getting new page table for page: %u\n", - req->rd_page); -#endif - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -1; - - sg_s = &table->sg_table[j = 0]; - } - -out: - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", - *se_mem_cnt); -#endif - return 0; -} - -/* rd_DIRECT_do_se_mem_map(): - * - * - */ -static int rd_DIRECT_do_se_mem_map( - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset_in) -{ - struct se_cmd *cmd = task->task_se_cmd; - struct rd_request *req = RD_REQ(task); - u32 task_offset = *task_offset_in; - unsigned long long lba; - int ret; - - req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / - PAGE_SIZE); - lba = task->task_lba; - req->rd_offset = (do_div(lba, - (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * - DEV_ATTRIB(task->se_dev)->block_size; - req->rd_size = task->task_size; - - if (req->rd_offset) - ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, - task_offset_in); - else - ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, - task_offset_in); - - if (ret < 0) - return ret; - - if (CMD_TFO(cmd)->task_sg_chaining == 0) - return 0; - /* - * Currently prevent writers from multiple HW fabrics doing - * pci_map_sg() to RD_DR's internal scatterlist memory. - */ - if (cmd->data_direction == DMA_TO_DEVICE) { - printk(KERN_ERR "DMA_TO_DEVICE not supported for" - " RAMDISK_DR with task_sg_chaining=1\n"); - return -1; - } - /* - * Special case for if task_sg_chaining is enabled, then - * we setup struct se_task->task_sg[], as it will be used by - * transport_do_task_sg_chain() for creating chainged SGLs - * across multiple struct se_task->task_sg[]. - */ - if (!(transport_calc_sg_num(task, - list_entry(T_TASK(cmd)->t_mem_list->next, - struct se_mem, se_list), - task_offset))) - return -1; - - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_entry(T_TASK(cmd)->t_mem_list->next, - struct se_mem, se_list), - out_se_mem, se_mem_cnt, task_offset_in); -} - -/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int rd_DIRECT_do_task(struct se_task *task) -{ - /* - * At this point the locally allocated RD tables have been mapped - * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). - */ - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} - /* rd_free_task(): (Part of se_subsystem_api_t template) * * @@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params( case Opt_rd_pages: match_int(args, &arg); rd_dev->rd_page_count = arg; - printk(KERN_INFO "RAMDISK: Referencing Page" + pr_debug("RAMDISK: Referencing Page" " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; @@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { - printk(KERN_INFO "Missing rd_pages= parameter\n"); - return -1; + pr_debug("Missing rd_pages= parameter\n"); + return -EINVAL; } return 0; @@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev) { struct rd_dev *rd_dev = dev->dev_ptr; unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / - DEV_ATTRIB(dev)->block_size) - 1; + dev->se_sub_dev->se_dev_attrib.block_size) - 1; return blocks_long; } -static struct se_subsystem_api rd_dr_template = { - .name = "rd_dr", - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, - .attach_hba = rd_attach_hba, - .detach_hba = rd_detach_hba, - .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, - .create_virtdevice = rd_DIRECT_create_virtdevice, - .free_device = rd_free_device, - .alloc_task = rd_alloc_task, - .do_task = rd_DIRECT_do_task, - .free_task = rd_free_task, - .check_configfs_dev_params = rd_check_configfs_dev_params, - .set_configfs_dev_params = rd_set_configfs_dev_params, - .show_configfs_dev_params = rd_show_configfs_dev_params, - .get_cdb = rd_get_cdb, - .get_device_rev = rd_get_device_rev, - .get_device_type = rd_get_device_type, - .get_blocks = rd_get_blocks, - .do_se_mem_map = rd_DIRECT_do_se_mem_map, -}; - static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, @@ -1071,13 +754,8 @@ int __init rd_module_init(void) { int ret; - ret = transport_subsystem_register(&rd_dr_template); - if (ret < 0) - return ret; - ret = transport_subsystem_register(&rd_mcp_template); if (ret < 0) { - transport_subsystem_release(&rd_dr_template); return ret; } @@ -1086,6 +764,5 @@ int __init rd_module_init(void) void rd_module_exit(void) { - transport_subsystem_release(&rd_dr_template); transport_subsystem_release(&rd_mcp_template); } diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 3ea19e29d8e..0d027732cd0 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -7,8 +7,6 @@ /* Largest piece of memory kmalloc can allocate */ #define RD_MAX_ALLOCATION_SIZE 65536 -/* Maximum queuedepth for the Ramdisk HBA */ -#define RD_HBA_QUEUE_DEPTH 256 #define RD_DEVICE_QUEUE_DEPTH 32 #define RD_MAX_DEVICE_QUEUE_DEPTH 128 #define RD_BLOCKSIZE 512 @@ -34,8 +32,6 @@ struct rd_request { u32 rd_page_count; /* Scatterlist count */ u32 rd_size; - /* Ramdisk device */ - struct rd_dev *rd_dev; } ____cacheline_aligned; struct rd_dev_sg_table { diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c index dc6fed037ab..72843441d4f 100644 --- a/drivers/target/target_core_scdb.c +++ b/drivers/target/target_core_scdb.c @@ -42,13 +42,13 @@ */ void split_cdb_XX_6( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { cdb[1] = (lba >> 16) & 0x1f; cdb[2] = (lba >> 8) & 0xff; cdb[3] = lba & 0xff; - cdb[4] = *sectors & 0xff; + cdb[4] = sectors & 0xff; } /* split_cdb_XX_10(): @@ -57,11 +57,11 @@ void split_cdb_XX_6( */ void split_cdb_XX_10( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(*sectors, &cdb[7]); + put_unaligned_be16(sectors, &cdb[7]); } /* split_cdb_XX_12(): @@ -70,11 +70,11 @@ void split_cdb_XX_10( */ void split_cdb_XX_12( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(*sectors, &cdb[6]); + put_unaligned_be32(sectors, &cdb[6]); } /* split_cdb_XX_16(): @@ -83,11 +83,11 @@ void split_cdb_XX_12( */ void split_cdb_XX_16( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(*sectors, &cdb[10]); + put_unaligned_be32(sectors, &cdb[10]); } /* @@ -97,9 +97,9 @@ void split_cdb_XX_16( */ void split_cdb_XX_32( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(*sectors, &cdb[28]); + put_unaligned_be32(sectors, &cdb[28]); } diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h index 98cd1c01ed8..48e9ccc9585 100644 --- a/drivers/target/target_core_scdb.h +++ b/drivers/target/target_core_scdb.h @@ -1,10 +1,10 @@ #ifndef TARGET_CORE_SCDB_H #define TARGET_CORE_SCDB_H -extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); #endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 5e3a067a747..a8d6e1dee93 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name( return -ENODEV; /* scsiLuWwnName */ return snprintf(page, PAGE_SIZE, "%s\n", - (strlen(DEV_T10_WWN(dev)->unit_serial)) ? - (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); + (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? + dev->se_sub_dev->t10_wwn.unit_serial : "None"); } DEV_STAT_SCSI_LU_ATTR_RO(lu_name); @@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; if (!dev) return -ENODEV; + /* scsiLuVendorId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 8; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? - DEV_T10_WWN(dev)->vendor[j] : 0x20; - str[8] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? + dev->se_sub_dev->t10_wwn.vendor[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(vend); @@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; if (!dev) return -ENODEV; /* scsiLuProductId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 16; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? - DEV_T10_WWN(dev)->model[j] : 0x20; - str[16] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? + dev->se_sub_dev->t10_wwn.model[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(prod); @@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; if (!dev) return -ENODEV; /* scsiLuRevisionId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 4; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? - DEV_T10_WWN(dev)->revision[j] : 0x20; - str[4] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? + dev->se_sub_dev->t10_wwn.revision[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(rev); @@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type( /* scsiLuPeripheralType */ return snprintf(page, PAGE_SIZE, "%u\n", - TRANSPORT(dev)->get_device_type(dev)); + dev->transport->get_device_type(dev)); } DEV_STAT_SCSI_LU_ATTR_RO(dev_type); @@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = { */ void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) { - struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; + struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, "scsi_dev", &target_stat_scsi_dev_cit); - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, "scsi_lu", &target_stat_scsi_lu_cit); - dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; - dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; - dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; + dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; + dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; + dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; dev_stat_grp->default_groups[3] = NULL; } @@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name( tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", - TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); + tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device( tpg = sep->sep_tpg; /* scsiTransportType */ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx( } tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name( return -ENODEV; } tpg = sep->sep_tpg; - wwn = DEV_T10_WWN(dev); + wwn = &dev->se_sub_dev->t10_wwn; /* scsiTransportDevName */ ret = snprintf(page, PAGE_SIZE, "%s+%s\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), (strlen(wwn->unit_serial)) ? wwn->unit_serial : wwn->vendor); spin_unlock(&lun->lun_sep_lock); @@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = { */ void target_stat_setup_port_default_groups(struct se_lun *lun) { - struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, "scsi_port", &target_stat_scsi_port_cit); - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, "scsi_transport", &target_stat_scsi_transport_cit); - port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; - port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; - port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; + port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; + port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; + port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; port_stat_grp->default_groups[3] = NULL; } @@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( } tpg = nacl->se_tpg; /* scsiAuthIntrTgtPortIndex */ - ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( } tpg = nacl->se_tpg; /* scsiPortIndex */ - ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( tpg = nacl->se_tpg; /* scsiAttIntrPortIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->sess_get_index(se_sess)); + tpg->se_tpg_tfo->sess_get_index(se_sess)); spin_unlock_irq(&nacl->nacl_sess_lock); return ret; } @@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( tpg = nacl->se_tpg; /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ memset(buf, 0, 64); - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) - TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) + tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, (unsigned char *)&buf[0], 64); ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); @@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = { */ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) { - struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; - config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, + config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); - config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, + config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); - ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; - ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; + ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; + ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; ml_stat_grp->default_groups[2] = NULL; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 179063d81cd..27d4925e51c 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -41,13 +41,6 @@ #include "target_core_alua.h" #include "target_core_pr.h" -#define DEBUG_LUN_RESET -#ifdef DEBUG_LUN_RESET -#define DEBUG_LR(x...) printk(KERN_INFO x) -#else -#define DEBUG_LR(x...) -#endif - struct se_tmr_req *core_tmr_alloc_req( struct se_cmd *se_cmd, void *fabric_tmr_ptr, @@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req( tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? GFP_ATOMIC : GFP_KERNEL); - if (!(tmr)) { - printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); + if (!tmr) { + pr_err("Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); } tmr->task_cmd = se_cmd; @@ -80,9 +73,9 @@ void core_tmr_release_req( return; } - spin_lock(&dev->se_tmr_lock); + spin_lock_irq(&dev->se_tmr_lock); list_del(&tmr->tmr_list); - spin_unlock(&dev->se_tmr_lock); + spin_unlock_irq(&dev->se_tmr_lock); kmem_cache_free(se_tmr_req_cache, tmr); } @@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort( int tas, int fe_count) { - if (!(fe_count)) { + if (!fe_count) { transport_cmd_finish_abort(cmd, 1); return; } /* * TASK ABORTED status (TAS) bit support */ - if (((tmr_nacl != NULL) && + if ((tmr_nacl && (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) transport_send_task_abort(cmd); @@ -113,15 +106,14 @@ int core_tmr_lun_reset( struct list_head *preempt_and_abort_list, struct se_cmd *prout_cmd) { - struct se_cmd *cmd; - struct se_queue_req *qr, *qr_tmp; + struct se_cmd *cmd, *tcmd; struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = dev->dev_queue_obj; + struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_tmr_req *tmr_p, *tmr_pp; struct se_task *task, *task_tmp; unsigned long flags; - int fe_count, state, tas; + int fe_count, tas; /* * TASK_ABORTED status bit, this is configurable via ConfigFS * struct se_device attributes. spc4r17 section 7.4.6 Control mode page @@ -133,7 +125,7 @@ int core_tmr_lun_reset( * which the command was received shall be completed with TASK ABORTED * status (see SAM-4). */ - tas = DEV_ATTRIB(dev)->emulate_tas; + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; /* * Determine if this se_tmr is coming from a $FABRIC_MOD * or struct se_device passthrough.. @@ -142,20 +134,20 @@ int core_tmr_lun_reset( tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; tmr_tpg = tmr->task_cmd->se_sess->se_tpg; if (tmr_nacl && tmr_tpg) { - DEBUG_LR("LUN_RESET: TMR caller fabric: %s" + pr_debug("LUN_RESET: TMR caller fabric: %s" " initiator port %s\n", - TPG_TFO(tmr_tpg)->get_fabric_name(), + tmr_tpg->se_tpg_tfo->get_fabric_name(), tmr_nacl->initiatorname); } } - DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", (preempt_and_abort_list) ? "Preempt" : "TMR", - TRANSPORT(dev)->name, tas); + dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ - spin_lock(&dev->se_tmr_lock); + spin_lock_irq(&dev->se_tmr_lock); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -164,8 +156,8 @@ int core_tmr_lun_reset( continue; cmd = tmr_p->task_cmd; - if (!(cmd)) { - printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); + if (!cmd) { + pr_err("Unable to locate struct se_cmd for TMR\n"); continue; } /* @@ -173,33 +165,33 @@ int core_tmr_lun_reset( * parameter (eg: for PROUT PREEMPT_AND_ABORT service action * skip non regisration key matching TMRs. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; - spin_unlock(&dev->se_tmr_lock); + spin_unlock_irq(&dev->se_tmr_lock); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - spin_lock(&dev->se_tmr_lock); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->t_transport_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + spin_lock_irq(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - spin_lock(&dev->se_tmr_lock); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + spin_lock_irq(&dev->se_tmr_lock); continue; } - DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); - spin_lock(&dev->se_tmr_lock); + spin_lock_irq(&dev->se_tmr_lock); } - spin_unlock(&dev->se_tmr_lock); + spin_unlock_irq(&dev->se_tmr_lock); /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -224,23 +216,17 @@ int core_tmr_lun_reset( spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, t_state_list) { - if (!(TASK_CMD(task))) { - printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + if (!task->task_se_cmd) { + pr_err("task->task_se_cmd is NULL!\n"); continue; } - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - if (!T_TASK(cmd)) { - printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); - continue; - } /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; @@ -254,38 +240,38 @@ int core_tmr_lun_reset( atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" + spin_lock_irqsave(&cmd->t_state_lock, flags); + pr_debug("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, - CMD_TFO(cmd)->get_task_tag(cmd), 0, - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); - DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" + cmd->se_tfo->get_task_tag(cmd), 0, + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, + cmd->deferred_t_state, cmd->t_task_cdb[0]); + pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, - T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, + cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" + pr_debug("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); - DEBUG_LR("LUN_RESET Completed task: %p shutdown for" + pr_debug("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -295,34 +281,34 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); - DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" + &cmd->t_state_lock, flags); + pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); + fe_count = atomic_read(&cmd->t_fe_count); - if (atomic_read(&T_TASK(cmd)->t_transport_active)) { - DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" + if (atomic_read(&cmd->t_transport_active)) { + pr_debug("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + atomic_set(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," + pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -337,25 +323,12 @@ int core_tmr_lun_reset( * reference, otherwise the struct se_cmd is released. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { - cmd = (struct se_cmd *)qr->cmd; - if (!(cmd)) { - /* - * Skip these for non PREEMPT_AND_ABORT usage.. - */ - if (preempt_and_abort_list != NULL) - continue; - - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - continue; - } + list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; @@ -365,30 +338,22 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); + atomic_dec(&cmd->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - state = qr->state; - kfree(qr); - - DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" + pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? - "Preempt" : "", cmd, state, - atomic_read(&T_TASK(cmd)->t_fe_count)); + "Preempt" : "", cmd, cmd->t_state, + atomic_read(&cmd->t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, - * and call TFO->new_cmd_failure() to wakeup any fabric - * dependent code used to wait for unsolicited data out - * allocation to complete. The fabric module is expected - * to dump any remaining unsolicited data out for the aborted - * command at this point. */ transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, - atomic_read(&T_TASK(cmd)->t_fe_count)); + atomic_read(&cmd->t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -396,21 +361,21 @@ int core_tmr_lun_reset( * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET */ - if (!(preempt_and_abort_list) && + if (!preempt_and_abort_list && (dev->dev_flags & DF_SPC2_RESERVATIONS)) { spin_lock(&dev->dev_reservation_lock); dev->dev_reserved_node_acl = NULL; dev->dev_flags &= ~DF_SPC2_RESERVATIONS; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); + pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); } spin_lock_irq(&dev->stats_lock); dev->num_resets++; spin_unlock_irq(&dev->stats_lock); - DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", + pr_debug("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", - TRANSPORT(dev)->name); + dev->transport->name); return 0; } diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 5ec745fed93..4f1ba4c5ef1 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -44,6 +44,12 @@ #include <target/target_core_fabric_ops.h> #include "target_core_hba.h" +#include "target_core_stat.h" + +extern struct se_device *g_lun0_dev; + +static DEFINE_SPINLOCK(tpg_lock); +static LIST_HEAD(tpg_list); /* core_clear_initiator_node_from_tpg(): * @@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg( continue; if (!deve->se_lun) { - printk(KERN_ERR "%s device entries device pointer is" + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } @@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg( spin_lock(&lun->lun_acl_lock); list_for_each_entry_safe(acl, acl_tmp, &lun->lun_acl_list, lacl_list) { - if (!(strcmp(acl->initiatorname, - nacl->initiatorname)) && - (acl->mapped_lun == deve->mapped_lun)) + if (!strcmp(acl->initiatorname, nacl->initiatorname) && + (acl->mapped_lun == deve->mapped_lun)) break; } if (!acl) { - printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," + pr_err("Unable to locate struct se_lun_acl for %s," " mapped_lun: %u\n", nacl->initiatorname, deve->mapped_lun); spin_unlock(&lun->lun_acl_lock); @@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl( struct se_node_acl *acl; list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!(strcmp(acl->initiatorname, initiatorname))) + if (!strcmp(acl->initiatorname, initiatorname)) return acl; } @@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!(strcmp(acl->initiatorname, initiatorname)) && - (!(acl->dynamic_node_acl))) { + if (!strcmp(acl->initiatorname, initiatorname) && + !acl->dynamic_node_acl) { spin_unlock_bh(&tpg->acl_node_lock); return acl; } @@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs( * By default in LIO-Target $FABRIC_MOD, * demo_mode_write_protect is ON, or READ_ONLY; */ - if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { + if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else @@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs( * Allow only optical drives to issue R/W in default RO * demo mode. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) + if (dev->transport->get_device_type(dev) == TYPE_DISK) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; } - printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" + pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" " access for LUN in Demo Mode\n", - TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? "READ-WRITE" : "READ-ONLY"); @@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node( struct se_node_acl *acl) { if (!acl->queue_depth) { - printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," - "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), + pr_err("Queue depth for %s Initiator Node: %s is 0," + "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); acl->queue_depth = 1; } @@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); - if (!(nacl->device_list)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!nacl->device_list) { + pr_err("Unable to allocate memory for" " struct se_node_acl->device_list\n"); - return -1; + return -ENOMEM; } for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &nacl->device_list[i]; @@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( struct se_node_acl *acl; acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if ((acl)) + if (acl) return acl; - if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) + if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) return NULL; - acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); - if (!(acl)) + acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); + if (!acl) return NULL; INIT_LIST_HEAD(&acl->acl_list); @@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( spin_lock_init(&acl->device_list_lock); spin_lock_init(&acl->nacl_sess_lock); atomic_set(&acl->acl_pr_ref_count, 0); - acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); + acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); spin_lock_init(&acl->stats_lock); acl->dynamic_node_acl = 1; - TPG_TFO(tpg)->set_default_node_attributes(acl); + tpg->se_tpg_tfo->set_default_node_attributes(acl); if (core_create_device_list_for_node(acl) < 0) { - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return NULL; } if (core_set_queue_depth_for_node(tpg, acl) < 0) { core_free_device_list_for_node(acl, tpg); - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return NULL; } @@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( tpg->num_node_acls++; spin_unlock_bh(&tpg->acl_node_lock); - printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); return acl; } @@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( spin_lock_bh(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); - if ((acl)) { + if (acl) { if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; - printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" - " for %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); + pr_debug("%s_TPG[%u] - Replacing dynamic ACL" + " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); spin_unlock_bh(&tpg->acl_node_lock); /* * Release the locally allocated struct se_node_acl @@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( * a pointer to an existing demo mode node ACL. */ if (se_nacl) - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, se_nacl); goto done; } - printk(KERN_ERR "ACL entry for %s Initiator" + pr_err("ACL entry for %s Initiator" " Node %s already exists for TPG %u, ignoring" - " request.\n", TPG_TFO(tpg)->get_fabric_name(), - initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + " request.\n", tpg->se_tpg_tfo->get_fabric_name(), + initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_bh(&tpg->acl_node_lock); return ERR_PTR(-EEXIST); } spin_unlock_bh(&tpg->acl_node_lock); - if (!(se_nacl)) { - printk("struct se_node_acl pointer is NULL\n"); + if (!se_nacl) { + pr_err("struct se_node_acl pointer is NULL\n"); return ERR_PTR(-EINVAL); } /* @@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); spin_lock_init(&acl->stats_lock); - TPG_TFO(tpg)->set_default_node_attributes(acl); + tpg->se_tpg_tfo->set_default_node_attributes(acl); if (core_create_device_list_for_node(acl) < 0) { - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return ERR_PTR(-ENOMEM); } if (core_set_queue_depth_for_node(tpg, acl) < 0) { core_free_device_list_for_node(acl, tpg); - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return ERR_PTR(-EINVAL); } @@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( spin_unlock_bh(&tpg->acl_node_lock); done: - printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); return acl; } @@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl( /* * Determine if the session needs to be closed by our context. */ - if (!(TPG_TFO(tpg)->shutdown_session(sess))) + if (!tpg->se_tpg_tfo->shutdown_session(sess)) continue; spin_unlock_bh(&tpg->session_lock); @@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl( * If the $FABRIC_MOD session for the Initiator Node ACL exists, * forcefully shutdown the $FABRIC_MOD session/nexus. */ - TPG_TFO(tpg)->close_session(sess); + tpg->se_tpg_tfo->close_session(sess); spin_lock_bh(&tpg->session_lock); } @@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl( core_clear_initiator_node_from_tpg(acl, tpg); core_free_device_list_for_node(acl, tpg); - printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); + pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); return 0; } @@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth( spin_lock_bh(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!(acl)) { - printk(KERN_ERR "Access Control List entry for %s Initiator" + if (!acl) { + pr_err("Access Control List entry for %s Initiator" " Node %s does not exists for TPG %hu, ignoring" - " request.\n", TPG_TFO(tpg)->get_fabric_name(), - initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + " request.\n", tpg->se_tpg_tfo->get_fabric_name(), + initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_bh(&tpg->acl_node_lock); return -ENODEV; } @@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth( continue; if (!force) { - printk(KERN_ERR "Unable to change queue depth for %s" + pr_err("Unable to change queue depth for %s" " Initiator Node: %s while session is" " operational. To forcefully change the queue" " depth and force session reinstatement" " use the \"force=1\" parameter.\n", - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); spin_unlock_bh(&tpg->session_lock); spin_lock_bh(&tpg->acl_node_lock); @@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth( /* * Determine if the session needs to be closed by our context. */ - if (!(TPG_TFO(tpg)->shutdown_session(sess))) + if (!tpg->se_tpg_tfo->shutdown_session(sess)) continue; init_sess = sess; @@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth( * Change the value in the Node's struct se_node_acl, and call * core_set_queue_depth_for_node() to add the requested queue depth. * - * Finally call TPG_TFO(tpg)->close_session() to force session + * Finally call tpg->se_tpg_tfo->close_session() to force session * reinstatement to occur if there is an active session for the * $FABRIC_MOD Initiator Node in question. */ @@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth( * Force session reinstatement if * core_set_queue_depth_for_node() failed, because we assume * the $FABRIC_MOD has already the set session reinstatement - * bit from TPG_TFO(tpg)->shutdown_session() called above. + * bit from tpg->se_tpg_tfo->shutdown_session() called above. */ if (init_sess) - TPG_TFO(tpg)->close_session(init_sess); + tpg->se_tpg_tfo->close_session(init_sess); spin_lock_bh(&tpg->acl_node_lock); if (dynamic_acl) @@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth( * forcefully shutdown the $FABRIC_MOD session/nexus. */ if (init_sess) - TPG_TFO(tpg)->close_session(init_sess); + tpg->se_tpg_tfo->close_session(init_sess); - printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" + pr_debug("Successfuly changed queue depth to: %d for Initiator" " Node: %s on %s Target Portal Group: %u\n", queue_depth, - initiatorname, TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg)); + initiatorname, tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_lock_bh(&tpg->acl_node_lock); if (dynamic_acl) @@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) { /* Set in core_dev_setup_virtual_lun0() */ - struct se_device *dev = se_global->g_lun0_dev; + struct se_device *dev = g_lun0_dev; struct se_lun *lun = &se_tpg->tpg_virt_lun0; u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; int ret; @@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); if (ret < 0) - return -1; + return ret; return 0; } @@ -638,8 +643,8 @@ int core_tpg_register( se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); - if (!(se_tpg->tpg_lun_list)) { - printk(KERN_ERR "Unable to allocate struct se_portal_group->" + if (!se_tpg->tpg_lun_list) { + pr_err("Unable to allocate struct se_portal_group->" "tpg_lun_list\n"); return -ENOMEM; } @@ -663,7 +668,7 @@ int core_tpg_register( se_tpg->se_tpg_wwn = se_wwn; atomic_set(&se_tpg->tpg_pr_ref_count, 0); INIT_LIST_HEAD(&se_tpg->acl_node_list); - INIT_LIST_HEAD(&se_tpg->se_tpg_list); + INIT_LIST_HEAD(&se_tpg->se_tpg_node); INIT_LIST_HEAD(&se_tpg->tpg_sess_list); spin_lock_init(&se_tpg->acl_node_lock); spin_lock_init(&se_tpg->session_lock); @@ -676,11 +681,11 @@ int core_tpg_register( } } - spin_lock_bh(&se_global->se_tpg_lock); - list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); - spin_unlock_bh(&se_global->se_tpg_lock); + spin_lock_bh(&tpg_lock); + list_add_tail(&se_tpg->se_tpg_node, &tpg_list); + spin_unlock_bh(&tpg_lock); - printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" + pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? @@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) { struct se_node_acl *nacl, *nacl_tmp; - printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" + pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" " for endpoint: %s Portal Tag %u\n", (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? - "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), - TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - spin_lock_bh(&se_global->se_tpg_lock); - list_del(&se_tpg->se_tpg_list); - spin_unlock_bh(&se_global->se_tpg_lock); + spin_lock_bh(&tpg_lock); + list_del(&se_tpg->se_tpg_node); + spin_unlock_bh(&tpg_lock); while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) cpu_relax(); @@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); - TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); + se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); spin_lock_bh(&se_tpg->acl_node_lock); } @@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun( struct se_lun *lun; if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return ERR_PTR(-EOVERFLOW); } spin_lock(&tpg->tpg_lun_lock); lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "TPG Logical Unit Number: %u is already active" + pr_err("TPG Logical Unit Number: %u is already active" " on %s Target Portal Group: %u, ignoring request.\n", - unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg)); + unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return ERR_PTR(-EINVAL); } @@ -772,8 +777,11 @@ int core_tpg_post_addlun( u32 lun_access, void *lun_ptr) { - if (core_dev_export(lun_ptr, tpg, lun) < 0) - return -1; + int ret; + + ret = core_dev_export(lun_ptr, tpg, lun); + if (ret < 0) + return ret; spin_lock(&tpg->tpg_lun_lock); lun->lun_access = lun_access; @@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun( struct se_lun *lun; if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return ERR_PTR(-EOVERFLOW); } spin_lock(&tpg->tpg_lun_lock); lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %u, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return ERR_PTR(-ENODEV); } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4b9b7169bdd..46352d658e3 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -58,139 +58,12 @@ #include "target_core_scdb.h" #include "target_core_ua.h" -/* #define DEBUG_CDB_HANDLER */ -#ifdef DEBUG_CDB_HANDLER -#define DEBUG_CDB_H(x...) printk(KERN_INFO x) -#else -#define DEBUG_CDB_H(x...) -#endif - -/* #define DEBUG_CMD_MAP */ -#ifdef DEBUG_CMD_MAP -#define DEBUG_CMD_M(x...) printk(KERN_INFO x) -#else -#define DEBUG_CMD_M(x...) -#endif - -/* #define DEBUG_MEM_ALLOC */ -#ifdef DEBUG_MEM_ALLOC -#define DEBUG_MEM(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM(x...) -#endif - -/* #define DEBUG_MEM2_ALLOC */ -#ifdef DEBUG_MEM2_ALLOC -#define DEBUG_MEM2(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM2(x...) -#endif - -/* #define DEBUG_SG_CALC */ -#ifdef DEBUG_SG_CALC -#define DEBUG_SC(x...) printk(KERN_INFO x) -#else -#define DEBUG_SC(x...) -#endif - -/* #define DEBUG_SE_OBJ */ -#ifdef DEBUG_SE_OBJ -#define DEBUG_SO(x...) printk(KERN_INFO x) -#else -#define DEBUG_SO(x...) -#endif - -/* #define DEBUG_CMD_VOL */ -#ifdef DEBUG_CMD_VOL -#define DEBUG_VOL(x...) printk(KERN_INFO x) -#else -#define DEBUG_VOL(x...) -#endif - -/* #define DEBUG_CMD_STOP */ -#ifdef DEBUG_CMD_STOP -#define DEBUG_CS(x...) printk(KERN_INFO x) -#else -#define DEBUG_CS(x...) -#endif - -/* #define DEBUG_PASSTHROUGH */ -#ifdef DEBUG_PASSTHROUGH -#define DEBUG_PT(x...) printk(KERN_INFO x) -#else -#define DEBUG_PT(x...) -#endif - -/* #define DEBUG_TASK_STOP */ -#ifdef DEBUG_TASK_STOP -#define DEBUG_TS(x...) printk(KERN_INFO x) -#else -#define DEBUG_TS(x...) -#endif - -/* #define DEBUG_TRANSPORT_STOP */ -#ifdef DEBUG_TRANSPORT_STOP -#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) -#else -#define DEBUG_TRANSPORT_S(x...) -#endif - -/* #define DEBUG_TASK_FAILURE */ -#ifdef DEBUG_TASK_FAILURE -#define DEBUG_TF(x...) printk(KERN_INFO x) -#else -#define DEBUG_TF(x...) -#endif - -/* #define DEBUG_DEV_OFFLINE */ -#ifdef DEBUG_DEV_OFFLINE -#define DEBUG_DO(x...) printk(KERN_INFO x) -#else -#define DEBUG_DO(x...) -#endif - -/* #define DEBUG_TASK_STATE */ -#ifdef DEBUG_TASK_STATE -#define DEBUG_TSTATE(x...) printk(KERN_INFO x) -#else -#define DEBUG_TSTATE(x...) -#endif - -/* #define DEBUG_STATUS_THR */ -#ifdef DEBUG_STATUS_THR -#define DEBUG_ST(x...) printk(KERN_INFO x) -#else -#define DEBUG_ST(x...) -#endif - -/* #define DEBUG_TASK_TIMEOUT */ -#ifdef DEBUG_TASK_TIMEOUT -#define DEBUG_TT(x...) printk(KERN_INFO x) -#else -#define DEBUG_TT(x...) -#endif - -/* #define DEBUG_GENERIC_REQUEST_FAILURE */ -#ifdef DEBUG_GENERIC_REQUEST_FAILURE -#define DEBUG_GRF(x...) printk(KERN_INFO x) -#else -#define DEBUG_GRF(x...) -#endif - -/* #define DEBUG_SAM_TASK_ATTRS */ -#ifdef DEBUG_SAM_TASK_ATTRS -#define DEBUG_STA(x...) printk(KERN_INFO x) -#else -#define DEBUG_STA(x...) -#endif - -struct se_global *se_global; +static int sub_api_initialized; static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; -struct kmem_cache *se_mem_cache; struct kmem_cache *t10_pr_reg_cache; struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; @@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; typedef int (*map_func_t)(struct se_task *, u32); static int transport_generic_write_pending(struct se_cmd *); -static int transport_processing_thread(void *); +static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); +static int transport_complete_qf(struct se_cmd *cmd); +static void transport_handle_queue_full(struct se_cmd *cmd, + struct se_device *dev, int (*qf_callback)(struct se_cmd *)); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, - unsigned long long starting_lba, u32 sectors, +static u32 transport_allocate_tasks(struct se_cmd *cmd, + unsigned long long starting_lba, enum dma_data_direction data_direction, - struct list_head *mem_list, int set_counts); -static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, - u32 dma_size); + struct scatterlist *sgl, unsigned int nents); +static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_remove(struct se_cmd *cmd, - int release_to_pool, int session_reinstatement); -static int transport_get_sectors(struct se_cmd *cmd); -static struct list_head *transport_init_se_mem_list(void); -static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, void *in_mem, - u32 *se_mem_cnt); -static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, - unsigned char *dst, struct list_head *se_mem_list); + int session_reinstatement); static void transport_release_fe_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static void transport_stop_all_task_timers(struct se_cmd *cmd); -int init_se_global(void) +int init_se_kmem_caches(void) { - struct se_global *global; - - global = kzalloc(sizeof(struct se_global), GFP_KERNEL); - if (!(global)) { - printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); - return -1; - } - - INIT_LIST_HEAD(&global->g_lu_gps_list); - INIT_LIST_HEAD(&global->g_se_tpg_list); - INIT_LIST_HEAD(&global->g_hba_list); - INIT_LIST_HEAD(&global->g_se_dev_list); - spin_lock_init(&global->g_device_lock); - spin_lock_init(&global->hba_lock); - spin_lock_init(&global->se_tpg_lock); - spin_lock_init(&global->lu_gps_lock); - spin_lock_init(&global->plugin_class_lock); - se_cmd_cache = kmem_cache_create("se_cmd_cache", sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!(se_cmd_cache)) { - printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); + if (!se_cmd_cache) { + pr_err("kmem_cache_create for struct se_cmd failed\n"); goto out; } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); - if (!(se_tmr_req_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" + if (!se_tmr_req_cache) { + pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), 0, NULL); - if (!(se_sess_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_session" + if (!se_sess_cache) { + pr_err("kmem_cache_create() for struct se_session" " failed\n"); goto out; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); - if (!(se_ua_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); - goto out; - } - se_mem_cache = kmem_cache_create("se_mem_cache", - sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); - if (!(se_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); + if (!se_ua_cache) { + pr_err("kmem_cache_create() for struct se_ua failed\n"); goto out; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), __alignof__(struct t10_pr_registration), 0, NULL); - if (!(t10_pr_reg_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" + if (!t10_pr_reg_cache) { + pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); goto out; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 0, NULL); - if (!(t10_alua_lu_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" + if (!t10_alua_lu_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); goto out; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), __alignof__(struct t10_alua_lu_gp_member), 0, NULL); - if (!(t10_alua_lu_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" + if (!t10_alua_lu_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); goto out; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); - if (!(t10_alua_tg_pt_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); goto out; } @@ -319,14 +163,12 @@ int init_se_global(void) sizeof(struct t10_alua_tg_pt_gp_member), __alignof__(struct t10_alua_tg_pt_gp_member), 0, NULL); - if (!(t10_alua_tg_pt_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "mem_t failed\n"); goto out; } - se_global = global; - return 0; out: if (se_cmd_cache) @@ -337,8 +179,6 @@ out: kmem_cache_destroy(se_sess_cache); if (se_ua_cache) kmem_cache_destroy(se_ua_cache); - if (se_mem_cache) - kmem_cache_destroy(se_mem_cache); if (t10_pr_reg_cache) kmem_cache_destroy(t10_pr_reg_cache); if (t10_alua_lu_gp_cache) @@ -349,45 +189,25 @@ out: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); if (t10_alua_tg_pt_gp_mem_cache) kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - return -1; + return -ENOMEM; } -void release_se_global(void) +void release_se_kmem_caches(void) { - struct se_global *global; - - global = se_global; - if (!(global)) - return; - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); - kmem_cache_destroy(se_mem_cache); kmem_cache_destroy(t10_pr_reg_cache); kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - - se_global = NULL; } -/* SCSI statistics table index */ -static struct scsi_index_table scsi_index_table; - -/* - * Initialize the index table for allocating unique row indexes to various mib - * tables. - */ -void init_scsi_index_table(void) -{ - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); - spin_lock_init(&scsi_index_table.lock); -} +/* This code ensures unique mib indexes are handed out. */ +static DEFINE_SPINLOCK(scsi_mib_index_lock); +static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; /* * Allocate a new row index for the entry type specified @@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type) { u32 new_index; - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { - printk(KERN_ERR "Invalid index type %d\n", type); - return -EINVAL; - } + BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); - spin_lock(&scsi_index_table.lock); - new_index = ++scsi_index_table.scsi_mib_index[type]; - if (new_index == 0) - new_index = ++scsi_index_table.scsi_mib_index[type]; - spin_unlock(&scsi_index_table.lock); + spin_lock(&scsi_mib_index_lock); + new_index = ++scsi_mib_index[type]; + spin_unlock(&scsi_mib_index_lock); return new_index; } @@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void) ret = request_module("target_core_iblock"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_iblock\n"); + pr_err("Unable to load target_core_iblock\n"); ret = request_module("target_core_file"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_file\n"); + pr_err("Unable to load target_core_file\n"); ret = request_module("target_core_pscsi"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_pscsi\n"); + pr_err("Unable to load target_core_pscsi\n"); ret = request_module("target_core_stgt"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_stgt\n"); + pr_err("Unable to load target_core_stgt\n"); return 0; } int transport_subsystem_check_init(void) { - if (se_global->g_sub_api_initialized) + int ret; + + if (sub_api_initialized) return 0; /* * Request the loading of known TCM subsystem plugins.. */ - if (transport_subsystem_reqmods() < 0) - return -1; + ret = transport_subsystem_reqmods(); + if (ret < 0) + return ret; - se_global->g_sub_api_initialized = 1; + sub_api_initialized = 1; return 0; } @@ -461,8 +279,8 @@ struct se_session *transport_init_session(void) struct se_session *se_sess; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); - if (!(se_sess)) { - printk(KERN_ERR "Unable to allocate struct se_session from" + if (!se_sess) { + pr_err("Unable to allocate struct se_session from" " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } @@ -497,9 +315,9 @@ void __transport_register_session( * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now. */ - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &buf[0], PR_REG_ISID_LEN); se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } @@ -516,8 +334,8 @@ void __transport_register_session( } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); - printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", - TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); + pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", + se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); @@ -541,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess) * Used by struct se_node_acl's under ConfigFS to locate active struct se_session */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); list_del(&se_sess->sess_acl_list); /* @@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess) struct se_portal_group *se_tpg = se_sess->se_tpg; struct se_node_acl *se_nacl; - if (!(se_tpg)) { + if (!se_tpg) { transport_free_session(se_sess); return; } @@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess) * struct se_node_acl if it had been previously dynamically generated. */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_bh(&se_tpg->acl_node_lock); if (se_nacl->dynamic_node_acl) { - if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( - se_tpg))) { + if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( + se_tpg)) { list_del(&se_nacl->acl_list); se_tpg->num_node_acls--; spin_unlock_bh(&se_tpg->acl_node_lock); core_tpg_wait_for_nacl_pr_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); - TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, + se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); spin_lock_bh(&se_tpg->acl_node_lock); } @@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); - printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", - TPG_TFO(se_tpg)->get_fabric_name()); + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", + se_tpg->se_tpg_tfo->get_fabric_name()); } EXPORT_SYMBOL(transport_deregister_session); /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - if (!T_TASK(cmd)) - return; - - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; - if (!(dev)) + if (!dev) continue; if (atomic_read(&task->task_active)) continue; - if (!(atomic_read(&task->task_state_active))) + if (!atomic_read(&task->task_state_active)) continue; spin_lock_irqsave(&dev->execute_task_lock, flags); list_del(&task->t_state_list); - DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", - CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", + cmd->se_tfo->get_task_tag(cmd), dev, task); spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task_cdbs_ex_left); } } @@ -663,34 +478,34 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" + if (atomic_read(&cmd->transport_lun_stop)) { + pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); + complete(&cmd->transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of - * this command for frontend excpections. + * this command for frontend exceptions. */ - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" + if (atomic_read(&cmd->t_transport_stop)) { + pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; @@ -703,13 +518,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&T_TASK(cmd)->t_transport_stop_comp); + complete(&cmd->t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -722,20 +537,20 @@ static int transport_cmd_check_stop( * their internally allocated I/O reference now and * struct se_cmd now. */ - if (CMD_TFO(cmd)->check_stop_free != NULL) { + if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - CMD_TFO(cmd)->check_stop_free(cmd); + cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd) { - struct se_lun *lun = SE_LUN(cmd); + struct se_lun *lun = cmd->se_lun; unsigned long flags; if (!lun) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_lun; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { - list_del(&cmd->se_lun_list); - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + if (atomic_read(&cmd->transport_lun_active)) { + list_del(&cmd->se_lun_node); + atomic_set(&cmd->transport_lun_active, 0); #if 0 - printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" - CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); + pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" + cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); #endif } spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); @@ -778,92 +593,59 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); if (transport_cmd_check_stop_to_fabric(cmd)) return; - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } -static int transport_add_cmd_to_queue( +static void transport_add_cmd_to_queue( struct se_cmd *cmd, int t_state) { struct se_device *dev = cmd->se_dev; - struct se_queue_obj *qobj = dev->dev_queue_obj; - struct se_queue_req *qr; + struct se_queue_obj *qobj = &dev->dev_queue_obj; unsigned long flags; - qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); - if (!(qr)) { - printk(KERN_ERR "Unable to allocate memory for" - " struct se_queue_req\n"); - return -1; - } - INIT_LIST_HEAD(&qr->qr_list); - - qr->cmd = (void *)cmd; - qr->state = t_state; + INIT_LIST_HEAD(&cmd->se_queue_node); if (t_state) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&T_TASK(cmd)->t_transport_active, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&qr->qr_list, &qobj->qobj_list); - atomic_inc(&T_TASK(cmd)->t_transport_queue_active); + if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { + cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + list_add(&cmd->se_queue_node, &qobj->qobj_list); + } else + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + atomic_inc(&cmd->t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); - return 0; -} - -/* - * Called with struct se_queue_obj->cmd_queue_lock held. - */ -static struct se_queue_req * -__transport_get_qr_from_queue(struct se_queue_obj *qobj) -{ - struct se_cmd *cmd; - struct se_queue_req *qr = NULL; - - if (list_empty(&qobj->qobj_list)) - return NULL; - - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; - - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } - list_del(&qr->qr_list); - atomic_dec(&qobj->queue_cnt); - - return qr; } -static struct se_queue_req * -transport_get_qr_from_queue(struct se_queue_obj *qobj) +static struct se_cmd * +transport_get_cmd_from_queue(struct se_queue_obj *qobj) { struct se_cmd *cmd; - struct se_queue_req *qr; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return NULL; } + cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; + atomic_dec(&cmd->t_transport_queue_active); - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return qr; + return cmd; } static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_cmd *q_cmd; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { + if (!atomic_read(&cmd->t_transport_queue_active)) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { - q_cmd = (struct se_cmd *)qr->cmd; - if (q_cmd != cmd) - continue; - - atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - } + list_for_each_entry(t, &qobj->qobj_list, se_queue_node) + if (t == cmd) { + atomic_dec(&cmd->t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&cmd->se_queue_node); + break; + } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { - printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - atomic_read(&T_TASK(cmd)->t_transport_queue_active)); + if (atomic_read(&cmd->t_transport_queue_active)) { + pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", + cmd->se_tfo->get_task_tag(cmd), + atomic_read(&cmd->t_transport_queue_active)); } } @@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, + struct se_task *task = list_entry(cmd->t_task_list.next, struct se_task, t_list); if (good) { @@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - TASK_CMD(task)->transport_error_status = + task->task_se_cmd->transport_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; } @@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache); */ void transport_complete_task(struct se_task *task, int success) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = task->se_dev; int t_state; unsigned long flags; #if 0 - printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - T_TASK(cmd)->t_task_cdb[0], dev); + pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, + cmd->t_task_cdb[0], dev); #endif - if (dev) { - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); + if (dev) atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement T_TASK(cmd)->t_se_count if this task had + * Decrement cmd->t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&T_TASK(cmd)->t_se_count); + atomic_dec(&cmd->t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success) * the processing thread. */ if (atomic_read(&task->task_timeout)) { - if (!(atomic_dec_and_test( - &T_TASK(cmd)->t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + if (!atomic_dec_and_test( + &cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { if (!success) - T_TASK(cmd)->t_tasks_failed = 1; + cmd->t_tasks_failed = 1; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (!success || T_TASK(cmd)->t_tasks_failed) { + if (!success || cmd->t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&T_TASK(cmd)->t_transport_complete, 1); + atomic_set(&cmd->t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr( &task_prev->t_execute_list : &dev->execute_task_list); - DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" + pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" " in execution queue\n", - T_TASK(task->task_se_cmd)->t_task_cdb[0]); + task->task_se_cmd->t_task_cdb[0]); return 1; } /* @@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue( atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), task, dev); } @@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) list_add_tail(&task->t_state_list, &dev->state_task_list); atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag( + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag( task->task_se_cmd), task, dev); spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) task_prev = task; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - return; -} - -/* transport_get_task_from_execute_queue(): - * - * Called with dev->execute_task_lock held. - */ -static struct se_task * -transport_get_task_from_execute_queue(struct se_device *dev) -{ - struct se_task *task; - - if (list_empty(&dev->execute_task_list)) - return NULL; - - list_for_each_entry(task, &dev->execute_task_list, t_execute_list) - break; - - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); - - return task; } /* transport_remove_task_from_execute_queue(): @@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue( spin_unlock_irqrestore(&dev->execute_task_lock, flags); } +/* + * Handle QUEUE_FULL / -EAGAIN status + */ + +static void target_qf_do_work(struct work_struct *work) +{ + struct se_device *dev = container_of(work, struct se_device, + qf_work_queue); + struct se_cmd *cmd, *cmd_tmp; + + spin_lock_irq(&dev->qf_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { + + list_del(&cmd->se_qf_node); + atomic_dec(&dev->dev_qf_count); + smp_mb__after_atomic_dec(); + spin_unlock_irq(&dev->qf_cmd_lock); + + pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" + " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, + (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" + : "UNKNOWN"); + /* + * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd + * has been added to head of queue + */ + transport_add_cmd_to_queue(cmd, cmd->t_state); + + spin_lock_irq(&dev->qf_cmd_lock); + } + spin_unlock_irq(&dev->qf_cmd_lock); +} + unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) { switch (cmd->data_direction) { @@ -1269,7 +1049,7 @@ void transport_dump_dev_state( atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); + dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); *bl += sprintf(b + *bl, " "); } @@ -1279,33 +1059,29 @@ void transport_dump_dev_state( */ static void transport_release_all_cmds(struct se_device *dev) { - struct se_cmd *cmd = NULL; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *cmd, *tcmd; int bug_out = 0, t_state; unsigned long flags; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, - qr_list) { - - cmd = (struct se_cmd *)qr->cmd; - t_state = qr->state; - list_del(&qr->qr_list); - kfree(qr); - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); + list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, + se_queue_node) { + t_state = cmd->t_state; + list_del(&cmd->se_queue_node); + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); - printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," + pr_err("Releasing ITT: 0x%08x, i_state: %u," " t_state: %u directly\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), t_state); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), t_state); transport_release_fe_cmd(cmd); bug_out = 1; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); #if 0 if (bug_out) BUG(); @@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id( if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk(KERN_INFO "%s", buf); + pr_debug("%s", buf); } void @@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Association: "); @@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc( break; default: sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); - ret = -1; + ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Type: "); @@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type( default: sprintf(buf+len, "Unsupported: 0x%02x\n", vpd->device_identifier_type); - ret = -1; + ret = -EINVAL; break; } - if (p_buf) + if (p_buf) { + if (p_buf_len < strlen(buf)+1) + return -EINVAL; strncpy(p_buf, buf, p_buf_len); - else - printk("%s", buf); + } else { + pr_debug("%s", buf); + } return ret; } @@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident( default: sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" " 0x%02x", vpd->device_identifier_code_set); - ret = -1; + ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev) * This is currently not available in upsream Linux/SCSI Target * mode code, and is assumed to be disabled while using TCM/pSCSI. */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; return; } dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; - DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" - " device\n", TRANSPORT(dev)->name, - TRANSPORT(dev)->get_device_rev(dev)); + pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" + " device\n", dev->transport->name, + dev->transport->get_device_rev(dev)); } static void scsi_dump_inquiry(struct se_device *dev) { - struct t10_wwn *wwn = DEV_T10_WWN(dev); + struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; int i, device_type; /* * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer */ - printk(" Vendor: "); + pr_debug(" Vendor: "); for (i = 0; i < 8; i++) if (wwn->vendor[i] >= 0x20) - printk("%c", wwn->vendor[i]); + pr_debug("%c", wwn->vendor[i]); else - printk(" "); + pr_debug(" "); - printk(" Model: "); + pr_debug(" Model: "); for (i = 0; i < 16; i++) if (wwn->model[i] >= 0x20) - printk("%c", wwn->model[i]); + pr_debug("%c", wwn->model[i]); else - printk(" "); + pr_debug(" "); - printk(" Revision: "); + pr_debug(" Revision: "); for (i = 0; i < 4; i++) if (wwn->revision[i] >= 0x20) - printk("%c", wwn->revision[i]); + pr_debug("%c", wwn->revision[i]); else - printk(" "); + pr_debug(" "); - printk("\n"); + pr_debug("\n"); - device_type = TRANSPORT(dev)->get_device_type(dev); - printk(" Type: %s ", scsi_device_type(device_type)); - printk(" ANSI SCSI revision: %02x\n", - TRANSPORT(dev)->get_device_rev(dev)); + device_type = dev->transport->get_device_type(dev); + pr_debug(" Type: %s ", scsi_device_type(device_type)); + pr_debug(" ANSI SCSI revision: %02x\n", + dev->transport->get_device_rev(dev)); } struct se_device *transport_add_device_to_core_hba( @@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba( struct se_device *dev; dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); - if (!(dev)) { - printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); - return NULL; - } - dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); - if (!(dev->dev_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_queue_obj\n"); - kfree(dev); + if (!dev) { + pr_err("Unable to allocate memory for se_dev_t\n"); return NULL; } - transport_init_queue_obj(dev->dev_queue_obj); - - dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), - GFP_KERNEL); - if (!(dev->dev_status_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_status_queue_obj\n"); - kfree(dev->dev_queue_obj); - kfree(dev); - return NULL; - } - transport_init_queue_obj(dev->dev_status_queue_obj); + transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; - dev->dev_ptr = (void *) transport_dev; + dev->dev_ptr = transport_dev; dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; @@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); + INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->ordered_cmd_lock); @@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba( spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); + spin_lock_init(&dev->qf_cmd_lock); dev->queue_depth = dev_limits->queue_depth; atomic_set(&dev->depth_left, dev->queue_depth); @@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba( * Startup the struct se_device processing thread */ dev->process_thread = kthread_run(transport_processing_thread, dev, - "LIO_%s", TRANSPORT(dev)->name); + "LIO_%s", dev->transport->name); if (IS_ERR(dev->process_thread)) { - printk(KERN_ERR "Unable to create kthread: LIO_%s\n", - TRANSPORT(dev)->name); + pr_err("Unable to create kthread: LIO_%s\n", + dev->transport->name); goto out; } - + /* + * Setup work_queue for QUEUE_FULL + */ + INIT_WORK(&dev->qf_work_queue, target_qf_do_work); /* * Preload the initial INQUIRY const values if we are doing * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI @@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba( * originals once back into DEV_T10_WWN(dev) for the virtual device * setup. */ - if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { - if (!(inquiry_prod) || !(inquiry_prod)) { - printk(KERN_ERR "All non TCM/pSCSI plugins require" + if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (!inquiry_prod || !inquiry_rev) { + pr_err("All non TCM/pSCSI plugins require" " INQUIRY consts\n"); goto out; } - strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); - strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); - strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); + strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); + strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); + strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); } scsi_dump_inquiry(dev); @@ -1754,8 +1522,6 @@ out: se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); return NULL; @@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd, enum dma_data_direction data_direction) { struct se_task *task; - struct se_device *dev = SE_DEV(cmd); - unsigned long flags; + struct se_device *dev = cmd->se_dev; - task = dev->transport->alloc_task(cmd); + task = dev->transport->alloc_task(cmd->t_task_cdb); if (!task) { - printk(KERN_ERR "Unable to allocate struct se_task\n"); + pr_err("Unable to allocate struct se_task\n"); return NULL; } @@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = T_TASK(cmd)->t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - return task; } static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); -void transport_device_setup_cmd(struct se_cmd *cmd) -{ - cmd->se_dev = SE_LUN(cmd)->lun_se_dev; -} -EXPORT_SYMBOL(transport_device_setup_cmd); - /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. @@ -1840,20 +1594,17 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_list); - INIT_LIST_HEAD(&cmd->se_delayed_list); - INIT_LIST_HEAD(&cmd->se_ordered_list); - /* - * Setup t_task pointer to t_task_backstore - */ - cmd->t_task = &cmd->t_task_backstore; + INIT_LIST_HEAD(&cmd->se_lun_node); + INIT_LIST_HEAD(&cmd->se_delayed_node); + INIT_LIST_HEAD(&cmd->se_ordered_node); + INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); - init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - init_completion(&T_TASK(cmd)->transport_lun_stop_comp); - init_completion(&T_TASK(cmd)->t_transport_stop_comp); - spin_lock_init(&T_TASK(cmd)->t_state_lock); - atomic_set(&T_TASK(cmd)->transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_task_list); + init_completion(&cmd->transport_lun_fe_stop_comp); + init_completion(&cmd->transport_lun_stop_comp); + init_completion(&cmd->t_transport_stop_comp); + spin_lock_init(&cmd->t_state_lock); + atomic_set(&cmd->transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { - DEBUG_STA("SAM Task Attribute ACA" + pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); - return -1; + return -EINVAL; } /* * Used to determine when ORDERED commands should go from * Dormant to Active status. */ - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); + cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); - DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, - TRANSPORT(cmd->se_dev)->name); + cmd->se_dev->transport->name); return 0; } @@ -1898,8 +1649,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) - kfree(T_TASK(se_cmd)->t_task_cdb); + if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) + kfree(se_cmd->t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks( */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD */ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { - printk(KERN_ERR "Received SCSI CDB with command_size: %d that" + pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); - return -1; + return -EINVAL; } /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { - T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(T_TASK(cmd)->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" - " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", + if (!cmd->t_task_cdb) { + pr_err("Unable to allocate cmd->t_task_cdb" + " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); - return -1; + (unsigned long)sizeof(cmd->__t_task_cdb)); + return -ENOMEM; } } else - T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; /* - * Copy the original CDB into T_TASK(cmd). + * Copy the original CDB into cmd-> */ - memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb + * checks for virtual device backends. The cmd->t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks( if (transport_check_alloc_task_attr(cmd) < 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) @@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks); int transport_generic_handle_cdb( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); @@ -1998,6 +1748,29 @@ int transport_generic_handle_cdb( EXPORT_SYMBOL(transport_generic_handle_cdb); /* + * Used by fabric module frontends to queue tasks directly. + * Many only be used from process context only + */ +int transport_handle_cdb_direct( + struct se_cmd *cmd) +{ + if (!cmd->se_lun) { + dump_stack(); + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; + } + if (in_interrupt()) { + dump_stack(); + pr_err("transport_generic_handle_cdb cannot be called" + " from interrupt context\n"); + return -EINVAL; + } + + return transport_generic_new_cmd(cmd); +} +EXPORT_SYMBOL(transport_handle_cdb_direct); + +/* * Used by fabric module frontends defining a TFO->new_cmd_map() caller * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to * complete setup in TCM process context w/ TFO->new_cmd_map(). @@ -2005,10 +1778,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); int transport_generic_handle_cdb_map( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + pr_err("cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); @@ -2030,7 +1803,7 @@ int transport_generic_handle_data( * in interrupt code, the signal_pending() check is skipped. */ if (!in_interrupt() && signal_pending(current)) - return -1; + return -EPERM; /* * If the received CDB has aleady been ABORTED by the generic * target engine, we now call transport_check_aborted_status() @@ -2057,7 +1830,6 @@ int transport_generic_handle_tmr( * This is needed for early exceptions. */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); return 0; @@ -2077,16 +1849,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) unsigned long flags; int ret = 0; - DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("ITT[0x%08x] - Stopping tasks\n", + cmd->se_tfo->get_task_tag(cmd)); /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { - DEBUG_TS("task_no[%d] - Processing task %p\n", + &cmd->t_task_list, t_list) { + pr_debug("task_no[%d] - Processing task %p\n", task->task_no, task); /* * If the struct se_task has not been sent and is not active, @@ -2094,14 +1866,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); - DEBUG_TS("task_no[%d] - Removed from execute queue\n", + pr_debug("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); continue; } @@ -2111,42 +1883,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - DEBUG_TS("task_no[%d] - Waiting to complete\n", + pr_debug("task_no[%d] - Waiting to complete\n", task->task_no); wait_for_completion(&task->task_stop_comp); - DEBUG_TS("task_no[%d] - Stopped successfully\n", + pr_debug("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); } else { - DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); + pr_debug("task_no[%d] - Did nothing\n", task->task_no); ret++; } __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return ret; } -static void transport_failure_reset_queue_depth(struct se_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); -} - /* * Handle SAM-esque emulation for generic transport request failures. */ @@ -2156,29 +1918,31 @@ static void transport_generic_request_failure( int complete, int sc) { - DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" - " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdb[0]); - DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" + int ret = 0; + + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->t_task_cdb[0]); + pr_debug("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", - CMD_TFO(cmd)->get_cmd_state(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->transport_error_status); - DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" + pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_task_cdbs_ex_left), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); transport_stop_all_task_timers(cmd); if (dev) - transport_failure_reset_queue_depth(dev); + atomic_inc(&dev->depth_left); /* * For SAM Task Attribute emulation for failed struct se_cmd */ @@ -2211,8 +1975,8 @@ static void transport_generic_request_failure( * we force this session to fall back to session * recovery. */ - CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); - CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); + cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); + cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); goto check_stop; case PYX_TRANSPORT_LU_COMM_FAILURE: @@ -2240,13 +2004,15 @@ static void transport_generic_request_failure( * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - CMD_TFO(cmd)->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; goto check_stop; case PYX_TRANSPORT_USE_SENSE_REASON: /* @@ -2254,8 +2020,8 @@ static void transport_generic_request_failure( */ break; default: - printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - T_TASK(cmd)->t_task_cdb[0], + pr_err("Unknown transport error for CDB 0x%02x: %d\n", + cmd->t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2263,32 +2029,41 @@ static void transport_generic_request_failure( if (!sc) transport_new_cmd_failure(cmd); - else - transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); + else { + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN) + goto queue_full; + } + check_stop: transport_lun_remove_cmd(cmd); - if (!(transport_cmd_check_stop_to_fabric(cmd))) + if (!transport_cmd_check_stop_to_fabric(cmd)) ; + return; + +queue_full: + cmd->t_state = TRANSPORT_COMPLETE_OK; + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->t_transport_timeout)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), - &T_TASK(cmd)->t_se_count); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_transport_timeout), + &cmd->t_se_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2296,35 +2071,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() + * Reset cmd->t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { - int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); - - atomic_sub(tmp, &T_TASK(cmd)->t_se_count); - } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - - transport_generic_remove(cmd, 0, 0); -} - -static int -transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) -{ - unsigned char *buf; + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - buf = kzalloc(data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate memory for buffer\n"); - return -1; + atomic_sub(tmp, &cmd->t_se_count); } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - T_TASK(cmd)->t_tasks_se_num = 0; - T_TASK(cmd)->t_task_buf = buf; - - return 0; + transport_generic_remove(cmd, 0); } static inline u32 transport_lba_21(unsigned char *cdb) @@ -2364,9 +2122,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } /* @@ -2375,14 +2133,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) static void transport_task_timeout_handler(unsigned long data) { struct se_task *task = (struct se_task *)data; - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; unsigned long flags; - DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); + pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2390,46 +2148,46 @@ static void transport_task_timeout_handler(unsigned long data) /* * Determine if transport_complete_task() has already been called. */ - if (!(atomic_read(&task->task_active))) { - DEBUG_TT("transport task: %p cmd: %p timeout task_active" + if (!atomic_read(&task->task_active)) { + pr_debug("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_inc(&T_TASK(cmd)->t_se_count); - atomic_inc(&T_TASK(cmd)->t_transport_timeout); - T_TASK(cmd)->t_tasks_failed = 1; + atomic_inc(&cmd->t_se_count); + atomic_inc(&cmd->t_transport_timeout); + cmd->t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; task->task_scsi_status = 1; if (atomic_read(&task->task_stop)) { - DEBUG_TT("transport task: %p cmd: %p timeout task_stop" + pr_debug("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { - DEBUG_TT("transport task: %p cmd: %p timeout non zero" + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { + pr_debug("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", + pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2441,8 +2199,8 @@ static void transport_start_task_timer(struct se_task *task) /* * If the task_timeout is disabled, exit now. */ - timeout = DEV_ATTRIB(dev)->task_timeout; - if (!(timeout)) + timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; + if (!timeout) return; init_timer(&task->task_timer); @@ -2453,27 +2211,27 @@ static void transport_start_task_timer(struct se_task *task) task->task_flags |= TF_RUNNING; add_timer(&task->task_timer); #if 0 - printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" + pr_debug("Starting task timer for cmd: %p task: %p seconds:" " %d\n", task->task_se_cmd, task, timeout); #endif } /* - * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; - if (!(task->task_flags & TF_RUNNING)) + if (!task->task_flags & TF_RUNNING) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2483,11 +2241,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) + &cmd->t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2498,7 +2256,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev) } else msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); return 0; } @@ -2511,45 +2269,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev) */ static inline int transport_execute_task_attr(struct se_cmd *cmd) { - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 1; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&SE_DEV(cmd)->dev_hoq_count); + atomic_inc(&cmd->se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); - DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" + pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_list, - &SE_DEV(cmd)->ordered_cmd_list); - spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); + spin_lock(&cmd->se_dev->ordered_cmd_lock); + list_add_tail(&cmd->se_ordered_node, + &cmd->se_dev->ordered_cmd_list); + spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); + atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); - DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" + pr_debug("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) + if (!atomic_read(&cmd->se_dev->simple_cmds)) return 1; } else { /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&SE_DEV(cmd)->simple_cmds); + atomic_inc(&cmd->se_dev->simple_cmds); smp_mb__after_atomic_inc(); } /* @@ -2557,20 +2315,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * add the dormant task(s) built for the passed struct se_cmd to the * execution queue and become in Active state for this struct se_device. */ - if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { + if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { /* * Otherwise, add cmd w/ tasks to delayed cmd queue that * will be drained upon completion of HEAD_OF_QUEUE task. */ - spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); + spin_lock(&cmd->se_dev->delayed_cmd_lock); cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; - list_add_tail(&cmd->se_delayed_list, - &SE_DEV(cmd)->delayed_cmd_list); - spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); + list_add_tail(&cmd->se_delayed_node, + &cmd->se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_dev->delayed_cmd_lock); - DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2592,25 +2350,23 @@ static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = - PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, NULL, 0, 1); - return 0; - } + if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { + cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; + transport_generic_request_failure(cmd, NULL, 0, 1); + return 0; } + /* * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. */ - if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { + if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { /* * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE * attribute for the tasks of the received struct se_cmd CDB */ add_tasks = transport_execute_task_attr(cmd); - if (add_tasks == 0) + if (!add_tasks) goto execute_tasks; /* * This calls transport_add_tasks_from_cmd() to handle @@ -2625,7 +2381,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * storage object. */ execute_tasks: - __transport_execute_tasks(SE_DEV(cmd)); + __transport_execute_tasks(cmd->se_dev); return 0; } @@ -2639,51 +2395,49 @@ static int __transport_execute_tasks(struct se_device *dev) { int error; struct se_cmd *cmd = NULL; - struct se_task *task; + struct se_task *task = NULL; unsigned long flags; /* * Check if there is enough room in the device and HBA queue to send - * struct se_transport_task's to the selected transport. + * struct se_tasks to the selected transport. */ check_depth: - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - if (!(atomic_read(&dev->depth_left)) || - !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + if (!atomic_read(&dev->depth_left)) return transport_tcq_window_closed(dev); - } - dev->dev_tcq_window_closed = 0; - spin_lock(&dev->execute_task_lock); - task = transport_get_task_from_execute_queue(dev); - spin_unlock(&dev->execute_task_lock); + dev->dev_tcq_window_closed = 0; - if (!task) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + spin_lock_irq(&dev->execute_task_lock); + if (list_empty(&dev->execute_task_list)) { + spin_unlock_irq(&dev->execute_task_lock); return 0; } + task = list_first_entry(&dev->execute_task_list, + struct se_task, t_execute_list); + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + spin_unlock_irq(&dev->execute_task_lock); atomic_dec(&dev->depth_left); - atomic_dec(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); + atomic_inc(&cmd->t_task_cdbs_sent); - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == - T_TASK(cmd)->t_task_cdbs) + if (atomic_read(&cmd->t_task_cdbs_sent) == + cmd->t_task_list_num) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used - * to grab REPORT_LUNS CDBs before they hit the + * to grab REPORT_LUNS and other CDBs we want to handle before they hit the * struct se_subsystem_api->do_task() caller below. */ if (cmd->transport_emulate_cdb) { @@ -2718,11 +2472,11 @@ check_depth: * call ->do_task() directly and let the underlying TCM subsystem plugin * code handle the CDB emulation. */ - if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && - (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && + (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) error = transport_emulate_control_cdb(task); else - error = TRANSPORT(dev)->do_task(task); + error = dev->transport->do_task(task); if (error != 0) { cmd->transport_error_status = error; @@ -2745,12 +2499,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); - - CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); @@ -2760,7 +2512,7 @@ static inline u32 transport_get_sectors_6( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2772,7 +2524,7 @@ static inline u32 transport_get_sectors_6( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; /* @@ -2788,7 +2540,7 @@ static inline u32 transport_get_sectors_10( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2800,8 +2552,8 @@ static inline u32 transport_get_sectors_10( /* * XXX_10 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2818,7 +2570,7 @@ static inline u32 transport_get_sectors_12( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2830,8 +2582,8 @@ static inline u32 transport_get_sectors_12( /* * XXX_12 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2848,7 +2600,7 @@ static inline u32 transport_get_sectors_16( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2860,7 +2612,7 @@ static inline u32 transport_get_sectors_16( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; type_disk: @@ -2890,57 +2642,30 @@ static inline u32 transport_get_size( unsigned char *cdb, struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { if (cdb[1] & 1) { /* sectors */ - return DEV_ATTRIB(dev)->block_size * sectors; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } else /* bytes */ return sectors; } #if 0 - printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" - " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, - DEV_ATTRIB(dev)->block_size * sectors, - TRANSPORT(dev)->name); + pr_debug("Returning block_size: %u, sectors: %u == %u for" + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, + dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); #endif - return DEV_ATTRIB(dev)->block_size * sectors; -} - -unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) -{ - unsigned char result = 0; - /* - * MSB - */ - if ((val[0] >= 'a') && (val[0] <= 'f')) - result = ((val[0] - 'a' + 10) & 0xf) << 4; - else - if ((val[0] >= 'A') && (val[0] <= 'F')) - result = ((val[0] - 'A' + 10) & 0xf) << 4; - else /* digit */ - result = ((val[0] - '0') & 0xf) << 4; - /* - * LSB - */ - if ((val[1] >= 'a') && (val[1] <= 'f')) - result |= ((val[1] - 'a' + 10) & 0xf); - else - if ((val[1] >= 'A') && (val[1] <= 'F')) - result |= ((val[1] - 'A' + 10) & 0xf); - else /* digit */ - result |= ((val[1] - '0') & 0xf); - - return result; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } -EXPORT_SYMBOL(transport_asciihex_to_binaryhex); static void transport_xor_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; - struct se_mem *se_mem; + struct scatterlist *sg; unsigned int offset; int i; + int count; /* * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command * @@ -2953,32 +2678,37 @@ static void transport_xor_callback(struct se_cmd *cmd) * 5) transfer the resulting XOR data to the data-in buffer. */ buf = kmalloc(cmd->data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate xor_callback buf\n"); + if (!buf) { + pr_err("Unable to allocate xor_callback buf\n"); return; } /* - * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); + sg_copy_to_buffer(cmd->t_data_sg, + cmd->t_data_nents, + buf, + cmd->data_length); + /* * Now perform the XOR against the BIDI read memory located at - * T_TASK(cmd)->t_mem_bidi_list + * cmd->t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { - addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); - if (!(addr)) + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { + addr = kmap_atomic(sg_page(sg), KM_USER0); + if (!addr) goto out; - for (i = 0; i < se_mem->se_len; i++) - *(addr + se_mem->se_off + i) ^= *(buf + offset + i); + for (i = 0; i < sg->length; i++) + *(addr + sg->offset + i) ^= *(buf + offset + i); - offset += se_mem->se_len; + offset += sg->length; kunmap_atomic(addr, KM_USER0); } + out: kfree(buf); } @@ -2994,75 +2724,60 @@ static int transport_get_sense_data(struct se_cmd *cmd) unsigned long flags; u32 offset = 0; - if (!SE_LUN(cmd)) { - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + WARN_ON(!cmd->se_lun); + + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (!task->task_sense) continue; dev = task->se_dev; - if (!(dev)) + if (!dev) continue; - if (!TRANSPORT(dev)->get_sense_buffer) { - printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" + if (!dev->transport->get_sense_buffer) { + pr_err("dev->transport->get_sense_buffer" " is NULL\n"); continue; } - sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); - if (!(sense_buffer)) { - printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" + sense_buffer = dev->transport->get_sense_buffer(task); + if (!sense_buffer) { + pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" " sense buffer for task with sense\n", - CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); + cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); - memcpy((void *)&buffer[offset], (void *)sense_buffer, + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); cmd->scsi_status = task->task_scsi_status; /* Automatically padded */ cmd->scsi_sense_length = (TRANSPORT_SENSE_BUFFER + offset); - printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" " and sense\n", - dev->se_hba->hba_id, TRANSPORT(dev)->name, + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return -1; } -static int transport_allocate_resources(struct se_cmd *cmd) -{ - u32 length = cmd->data_length; - - if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) - return transport_generic_get_mem(cmd, length, PAGE_SIZE); - else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) - return transport_generic_allocate_buf(cmd, length); - else - return 0; -} - static int transport_handle_reservation_conflict(struct se_cmd *cmd) { @@ -3077,12 +2792,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -2; + return -EINVAL; +} + +static inline long long transport_dev_end_lba(struct se_device *dev) +{ + return dev->transport->get_blocks(dev) + 1; +} + +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors; + + if (dev->transport->get_device_type(dev) != TYPE_DISK) + return 0; + + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { + pr_err("LBA: %llu Sectors: %u exceeds" + " transport_dev_end_lba(): %llu\n", + cmd->t_task_lba, sectors, + transport_dev_end_lba(dev)); + pr_err(" We should return CHECK_CONDITION" + " but we don't yet\n"); + return 0; + } + + return sectors; } /* transport_generic_cmd_sequencer(): @@ -3099,7 +2842,7 @@ static int transport_generic_cmd_sequencer( struct se_cmd *cmd, unsigned char *cdb) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; int ret = 0, sector_ret = 0, passthrough; u32 sectors = 0, size = 0, pr_reg_type = 0; @@ -3113,12 +2856,12 @@ static int transport_generic_cmd_sequencer( &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; - return -2; + return -EINVAL; } /* * Check status of Asymmetric Logical Unit Assignment port */ - ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); + ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); if (ret != 0) { cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; /* @@ -3128,22 +2871,22 @@ static int transport_generic_cmd_sequencer( */ if (ret > 0) { #if 0 - printk(KERN_INFO "[%s]: ALUA TG Port not available," + pr_debug("[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", - CMD_TFO(cmd)->get_fabric_name(), alua_ascq); + cmd->se_tfo->get_fabric_name(), alua_ascq); #endif transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; - return -2; + return -EINVAL; } goto out_invalid_cdb_field; } /* * Check status for SPC-3 Persistent Reservations */ - if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { - if (T10_PR_OPS(su_dev)->t10_seq_non_holder( + if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { + if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( cmd, cdb, pr_reg_type) != 0) return transport_handle_reservation_conflict(cmd); /* @@ -3160,7 +2903,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3169,7 +2912,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3178,7 +2921,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3187,7 +2930,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3196,7 +2939,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3205,8 +2948,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3215,8 +2958,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3225,22 +2968,22 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_64(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(T_TASK(cmd)->t_tasks_bidi)) + !(cmd->t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Skip the remaining assignments for TCM/PSCSI passthrough @@ -3251,7 +2994,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3259,7 +3002,7 @@ static int transport_generic_cmd_sequencer( * Determine if this is TCM/PSCSI device and we should disable * internal emulation for this CDB. */ - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); switch (service_action) { @@ -3273,7 +3016,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3287,14 +3030,22 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); + cmd->t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; - size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); + + if (sectors) + size = transport_get_size(sectors, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" + " supported\n"); + goto out_invalid_cdb_field; + } + + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3304,7 +3055,7 @@ static int transport_generic_cmd_sequencer( break; if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3314,28 +3065,28 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[10] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" + pr_err("WRITE_SAME w/o UNMAP bit not" " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } break; default: - printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" + pr_err("VARIABLE_LENGTH_CMD service action" " 0x%04x not supported\n", service_action); goto out_unsupported_cdb; } break; - case 0xa3: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + case MAINTENANCE_IN: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_IN from SCC-2 */ /* * Check for emulated MI_REPORT_TARGET_PGS. */ if (cdb[1] == MI_REPORT_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_report_target_port_groups : + core_emulate_report_target_port_groups : NULL; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -3344,7 +3095,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_SEND_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SELECT: size = cdb[4]; @@ -3356,7 +3107,7 @@ static int transport_generic_cmd_sequencer( break; case MODE_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SENSE_10: case GPCMD_READ_BUFFER_CAPACITY: @@ -3364,11 +3115,11 @@ static int transport_generic_cmd_sequencer( case LOG_SELECT: case LOG_SENSE: size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BLOCK_LIMITS: size = READ_BLOCK_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_GET_CONFIGURATION: case GPCMD_READ_FORMAT_CAPACITIES: @@ -3380,11 +3131,11 @@ static int transport_generic_cmd_sequencer( case PERSISTENT_RESERVE_IN: case PERSISTENT_RESERVE_OUT: cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type == + (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) ? - &core_scsi3_emulate_pr : NULL; + core_scsi3_emulate_pr : NULL; size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_MECHANISM_STATUS: case GPCMD_READ_DVD_STRUCTURE: @@ -3393,19 +3144,19 @@ static int transport_generic_cmd_sequencer( break; case READ_POSITION: size = READ_POSITION_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; - case 0xa4: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + case MAINTENANCE_OUT: + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_OUT from SCC-2 * * Check for emulated MO_SET_TARGET_PGS. */ if (cdb[1] == MO_SET_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_set_target_port_groups : + core_emulate_set_target_port_groups : NULL; } @@ -3415,7 +3166,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_REPORT_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case INQUIRY: size = (cdb[3] << 8) + cdb[4]; @@ -3423,23 +3174,23 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_CAPACITY: size = READ_CAP_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: case SECURITY_PROTOCOL_OUT: size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case SERVICE_ACTION_IN: case ACCESS_CONTROL_IN: @@ -3450,36 +3201,36 @@ static int transport_generic_cmd_sequencer( case WRITE_ATTRIBUTE: size = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: size = (cdb[3] << 8) | cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ #if 0 case GPCMD_READ_CD: sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; size = (2336 * sectors); - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; #endif case READ_TOC: size = cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case REQUEST_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RESERVE: case RESERVE_10: @@ -3500,9 +3251,9 @@ static int transport_generic_cmd_sequencer( * emulation disabled. */ cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -3517,9 +3268,9 @@ static int transport_generic_cmd_sequencer( size = cmd->data_length; cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3529,10 +3280,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3543,7 +3294,7 @@ static int transport_generic_cmd_sequencer( /* * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) break; /* * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation @@ -3554,32 +3305,27 @@ static int transport_generic_cmd_sequencer( * Check to ensure that LBA + Range does not exceed past end of * device. */ - if (transport_get_sectors(cmd) < 0) + if (!transport_cmd_get_valid_sectors(cmd)) goto out_invalid_cdb_field; break; case UNMAP: size = get_unaligned_be16(&cdb[7]); - passthrough = (TRANSPORT(dev)->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Determine if the received UNMAP used to for direct passthrough - * into Linux/SCSI with struct request via TCM/pSCSI or we are - * signaling the use of internal transport_generic_unmap() emulation - * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO - * subsystem plugin backstores. - */ - if (!(passthrough)) - cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; - - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; - size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); - passthrough = (TRANSPORT(dev)->transport_type == + + if (sectors) + size = transport_get_size(sectors, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; + } + + cmd->t_task_lba = get_unaligned_be16(&cdb[2]); + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Determine if the received WRITE_SAME_16 is used to for direct @@ -3588,9 +3334,9 @@ static int transport_generic_cmd_sequencer( * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and * TCM/FILEIO subsystem plugin backstores. */ - if (!(passthrough)) { + if (!passthrough) { if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3600,7 +3346,7 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[1] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " + pr_err("WRITE_SAME w/o UNMAP bit not " " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } @@ -3625,34 +3371,34 @@ static int transport_generic_cmd_sequencer( break; case REPORT_LUNS: cmd->transport_emulate_cdb = - &transport_core_report_lun_response; + transport_core_report_lun_response; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; default: - printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" + pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", - CMD_TFO(cmd)->get_fabric_name(), cdb[0]); + cmd->se_tfo->get_fabric_name(), cdb[0]); cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; goto out_unsupported_cdb; } if (size != cmd->data_length) { - printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" + pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" - " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), + " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cdb[0]); cmd->cmd_spdtl = size; if (cmd->data_direction == DMA_TO_DEVICE) { - printk(KERN_ERR "Rejecting underflow/overflow" + pr_err("Rejecting underflow/overflow" " WRITE data\n"); goto out_invalid_cdb_field; } @@ -3660,10 +3406,10 @@ static int transport_generic_cmd_sequencer( * Reject READ_* or WRITE_* with overflow/underflow for * type SCF_SCSI_DATA_SG_IO_CDB. */ - if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { - printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" + if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { + pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" " CDB on non 512-byte sector setup subsystem" - " plugin: %s\n", TRANSPORT(dev)->name); + " plugin: %s\n", dev->transport->name); /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } @@ -3678,105 +3424,22 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } + /* Let's limit control cdbs to a page, for simplicity's sake. */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + size > PAGE_SIZE) + goto out_invalid_cdb_field; + transport_set_supported_SAM_opcode(cmd); return ret; out_unsupported_cdb: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -2; + return -EINVAL; out_invalid_cdb_field: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; -} - -static inline void transport_release_tasks(struct se_cmd *); - -/* - * This function will copy a contiguous *src buffer into a destination - * struct scatterlist array. - */ -static void transport_memcpy_write_contig( - struct se_cmd *cmd, - struct scatterlist *sg_d, - unsigned char *src) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *dst; - - while (total_length) { - length = sg_d[i].length; - - if (length > total_length) - length = total_length; - - dst = sg_virt(&sg_d[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - src += length; - i++; - } -} - -/* - * This function will copy a struct scatterlist array *sg_s into a destination - * contiguous *dst buffer. - */ -static void transport_memcpy_read_contig( - struct se_cmd *cmd, - unsigned char *dst, - struct scatterlist *sg_s) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *src; - - while (total_length) { - length = sg_s[i].length; - - if (length > total_length) - length = total_length; - - src = sg_virt(&sg_s[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - dst += length; - i++; - } -} - -static void transport_memcpy_se_mem_read_contig( - struct se_cmd *cmd, - unsigned char *dst, - struct list_head *se_mem_list) -{ - struct se_mem *se_mem; - void *src; - u32 length = 0, total_length = cmd->data_length; - - list_for_each_entry(se_mem, se_mem_list, se_list) { - length = se_mem->se_len; - - if (length > total_length) - length = total_length; - - src = page_address(se_mem->se_page) + se_mem->se_off; - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - dst += length; - } + return -EINVAL; } /* @@ -3786,7 +3449,7 @@ static void transport_memcpy_se_mem_read_contig( */ static void transport_complete_task_attr(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_cmd *cmd_p, *cmd_tmp; int new_active_tasks = 0; @@ -3794,25 +3457,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd) atomic_dec(&dev->simple_cmds); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" + pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { atomic_dec(&dev->dev_hoq_count); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for" + pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_list); + list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } /* @@ -3822,15 +3485,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd) */ spin_lock(&dev->delayed_cmd_lock); list_for_each_entry_safe(cmd_p, cmd_tmp, - &dev->delayed_cmd_list, se_delayed_list) { + &dev->delayed_cmd_list, se_delayed_node) { - list_del(&cmd_p->se_delayed_list); + list_del(&cmd_p->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); - DEBUG_STA("Calling add_tasks() for" + pr_debug("Calling add_tasks() for" " cmd_p: 0x%02x Task Attr: 0x%02x" " Dormant -> Active, se_ordered_id: %u\n", - T_TASK(cmd_p)->t_task_cdb[0], + cmd_p->t_task_cdb[0], cmd_p->sam_task_attr, cmd_p->se_ordered_id); transport_add_tasks_from_cmd(cmd_p); @@ -3846,20 +3509,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd) * to do the processing of the Active tasks. */ if (new_active_tasks != 0) - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); +} + +static int transport_complete_qf(struct se_cmd *cmd) +{ + int ret = 0; + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + return cmd->se_tfo->queue_status(cmd); + + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + ret = cmd->se_tfo->queue_data_in(cmd); + break; + case DMA_TO_DEVICE: + if (cmd->t_bidi_data_sg) { + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret < 0) + return ret; + } + /* Fall through for DMA_TO_DEVICE */ + case DMA_NONE: + ret = cmd->se_tfo->queue_status(cmd); + break; + default: + break; + } + + return ret; +} + +static void transport_handle_queue_full( + struct se_cmd *cmd, + struct se_device *dev, + int (*qf_callback)(struct se_cmd *)) +{ + spin_lock_irq(&dev->qf_cmd_lock); + cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; + cmd->transport_qf_callback = qf_callback; + list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); + atomic_inc(&dev->dev_qf_count); + smp_mb__after_atomic_inc(); + spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); + + schedule_work(&cmd->se_dev->qf_work_queue); } static void transport_generic_complete_ok(struct se_cmd *cmd) { - int reason = 0; + int reason = 0, ret; /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); /* + * Check to schedule QUEUE_FULL work, or execute an existing + * cmd->transport_qf_callback() + */ + if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) + schedule_work(&cmd->se_dev->qf_work_queue); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret < 0) + goto queue_full; + + cmd->transport_qf_callback = NULL; + goto done; + } + /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. */ @@ -3872,8 +3594,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * a non GOOD status. */ if (cmd->scsi_status) { - transport_send_check_condition_and_sense( + ret = transport_send_check_condition_and_sense( cmd, reason, 1); + if (ret == -EAGAIN) + goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3889,53 +3614,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) switch (cmd->data_direction) { case DMA_FROM_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - /* - * If enabled by TCM fabirc module pre-registered SGL - * memory, perform the memcpy() from the TCM internal - * contigious buffer back to the original SGL. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_write_contig(cmd, - T_TASK(cmd)->t_task_pt_sgl, - T_TASK(cmd)->t_task_buf); - CMD_TFO(cmd)->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; case DMA_TO_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.rx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (T_TASK(cmd)->t_mem_bidi_list != NULL) { + if (cmd->t_bidi_data_sg) { spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - CMD_TFO(cmd)->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: - CMD_TFO(cmd)->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; break; default: break; } +done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); + return; + +queue_full: + pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," + " data_direction: %d\n", cmd, cmd->data_direction); + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_free_dev_tasks(struct se_cmd *cmd) @@ -3943,9 +3672,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3954,75 +3683,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (task->se_dev) - TRANSPORT(task->se_dev)->free_task(task); + task->se_dev->transport->free_task(task); else - printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", + pr_err("task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static inline void transport_free_pages(struct se_cmd *cmd) +static inline void transport_free_sgl(struct scatterlist *sgl, int nents) { - struct se_mem *se_mem, *se_mem_tmp; - int free_page = 1; - - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) - free_page = 0; - if (cmd->se_dev->transport->do_se_mem_map) - free_page = 0; + struct scatterlist *sg; + int count; - if (T_TASK(cmd)->t_task_buf) { - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = NULL; - return; - } + for_each_sg(sgl, sg, nents, count) + __free_page(sg_page(sg)); - /* - * Caller will handle releasing of struct se_mem. - */ - if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) - return; + kfree(sgl); +} - if (!(T_TASK(cmd)->t_tasks_se_num)) +static inline void transport_free_pages(struct se_cmd *cmd) +{ + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) return; - list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); - - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } - - if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { - list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_bidi_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); - - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } - } + transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); + cmd->t_data_sg = NULL; + cmd->t_data_nents = 0; - kfree(T_TASK(cmd)->t_mem_bidi_list); - T_TASK(cmd)->t_mem_bidi_list = NULL; - kfree(T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_mem_list = NULL; - T_TASK(cmd)->t_tasks_se_num = 0; + transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -4034,23 +3728,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_fe_count)) { + if (!atomic_dec_and_test(&cmd->t_fe_count)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - if (atomic_read(&T_TASK(cmd)->t_se_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + if (atomic_read(&cmd->t_se_count)) { + if (!atomic_dec_and_test(&cmd->t_se_count)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -4062,68 +3756,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); free_pages: transport_free_pages(cmd); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); + cmd->se_tfo->release_cmd(cmd); } -static int transport_generic_remove( - struct se_cmd *cmd, - int release_to_pool, - int session_reinstatement) +static int +transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) { unsigned long flags; - if (!(T_TASK(cmd))) - goto release_cmd; - if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } return 1; } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); + free_pages: transport_free_pages(cmd); - -release_cmd: - if (release_to_pool) { - transport_release_cmd_to_pool(cmd); - } else { - transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); - } - + transport_release_cmd(cmd); return 0; } /* - * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map + * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of + * allocating in the core. * @cmd: Associated se_cmd descriptor * @mem: SGL style memory for TCM WRITE / READ * @sg_mem_num: Number of SGL elements @@ -4135,614 +3818,163 @@ release_cmd: */ int transport_generic_map_mem_to_cmd( struct se_cmd *cmd, - struct scatterlist *mem, - u32 sg_mem_num, - struct scatterlist *mem_bidi_in, - u32 sg_mem_bidi_num) + struct scatterlist *sgl, + u32 sgl_count, + struct scatterlist *sgl_bidi, + u32 sgl_bidi_count) { - u32 se_mem_cnt_out = 0; - int ret; - - if (!(mem) || !(sg_mem_num)) + if (!sgl || !sgl_count) return 0; - /* - * Passed *mem will contain a list_head containing preformatted - * struct se_mem elements... - */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { - if ((mem_bidi_in) || (sg_mem_bidi_num)) { - printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" - " with BIDI-COMMAND\n"); - return -ENOSYS; - } - T_TASK(cmd)->t_mem_list = (struct list_head *)mem; - T_TASK(cmd)->t_tasks_se_num = sg_mem_num; - cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; - return 0; - } - /* - * Otherwise, assume the caller is passing a struct scatterlist - * array from include/linux/scatterlist.h - */ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { - /* - * For CDB using TCM struct se_mem linked list scatterlist memory - * processed into a TCM struct se_subsystem_dev, we do the mapping - * from the passed physical memory to struct se_mem->se_page here. - */ - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) - return -ENOMEM; - ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); - if (ret < 0) - return -ENOMEM; + cmd->t_data_sg = sgl; + cmd->t_data_nents = sgl_count; - T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; - /* - * Setup BIDI READ list of struct se_mem elements - */ - if ((mem_bidi_in) && (sg_mem_bidi_num)) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } - se_mem_cnt_out = 0; - - ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, - &se_mem_cnt_out); - if (ret < 0) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } - - T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; + if (sgl_bidi && sgl_bidi_count) { + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; - - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (mem_bidi_in || sg_mem_bidi_num) { - printk(KERN_ERR "BIDI-Commands not supported using " - "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); - return -ENOSYS; - } - /* - * For incoming CDBs using a contiguous buffer internall with TCM, - * save the passed struct scatterlist memory. After TCM storage object - * processing has completed for this struct se_cmd, TCM core will call - * transport_memcpy_[write,read]_contig() as necessary from - * transport_generic_complete_ok() and transport_write_pending() in order - * to copy the TCM buffer to/from the original passed *mem in SGL -> - * struct scatterlist format. - */ - cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - T_TASK(cmd)->t_task_pt_sgl = mem; } return 0; } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); - -static inline long long transport_dev_end_lba(struct se_device *dev) -{ - return dev->transport->get_blocks(dev) + 1; -} - -static int transport_get_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = SE_DEV(cmd); - - T_TASK(cmd)->t_tasks_sectors = - (cmd->data_length / DEV_ATTRIB(dev)->block_size); - if (!(T_TASK(cmd)->t_tasks_sectors)) - T_TASK(cmd)->t_tasks_sectors = 1; - - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) - return 0; - - if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > - transport_dev_end_lba(dev)) { - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" - " transport_dev_end_lba(): %llu\n", - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, - transport_dev_end_lba(dev)); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; - } - - return 0; -} - static int transport_new_cmd_obj(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - u32 task_cdbs = 0, rc; - - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { - task_cdbs++; - T_TASK(cmd)->t_task_cdbs++; - } else { - int set_counts = 1; + struct se_device *dev = cmd->se_dev; + u32 task_cdbs; + u32 rc; + int set_counts = 1; - /* - * Setup any BIDI READ tasks and memory from - * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. - */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, - set_counts); - if (!(rc)) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - set_counts = 0; - } - /* - * Setup the tasks and memory from T_TASK(cmd)->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - cmd->data_direction, T_TASK(cmd)->t_mem_list, - set_counts); - if (!(task_cdbs)) { + /* + * Setup any BIDI READ tasks and memory from + * cmd->t_mem_bidi_list so the READ struct se_tasks + * are queued first for the non pSCSI passthrough case. + */ + if (cmd->t_bidi_data_sg && + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + rc = transport_allocate_tasks(cmd, + cmd->t_task_lba, + DMA_FROM_DEVICE, + cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (rc <= 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - T_TASK(cmd)->t_task_cdbs += task_cdbs; - -#if 0 - printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" - " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, - T_TASK(cmd)->t_task_cdbs); -#endif - } - - atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); - return 0; -} - -static struct list_head *transport_init_se_mem_list(void) -{ - struct list_head *se_mem_list; - - se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); - if (!(se_mem_list)) { - printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); - return NULL; + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + set_counts = 0; } - INIT_LIST_HEAD(se_mem_list); - - return se_mem_list; -} - -static int -transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) -{ - unsigned char *buf; - struct se_mem *se_mem; - - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) - return -ENOMEM; - /* - * If the device uses memory mapping this is enough. + * Setup the tasks and memory from cmd->t_mem_list + * Note for BIDI transfers this will contain the WRITE payload */ - if (cmd->se_dev->transport->do_se_mem_map) - return 0; - - /* - * Setup BIDI-COMMAND READ list of struct se_mem elements - */ - if (T_TASK(cmd)->t_tasks_bidi) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); - return -ENOMEM; - } + task_cdbs = transport_allocate_tasks(cmd, + cmd->t_task_lba, + cmd->data_direction, + cmd->t_data_sg, + cmd->t_data_nents); + if (task_cdbs <= 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return PYX_TRANSPORT_LU_COMM_FAILURE; } - while (length) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - goto out; - } - -/* #warning FIXME Allocate contigous pages for struct se_mem elements */ - se_mem->se_page = alloc_pages(GFP_KERNEL, 0); - if (!(se_mem->se_page)) { - printk(KERN_ERR "alloc_pages() failed\n"); - goto out; - } - - buf = kmap_atomic(se_mem->se_page, KM_IRQ0); - if (!(buf)) { - printk(KERN_ERR "kmap_atomic() failed\n"); - goto out; - } - INIT_LIST_HEAD(&se_mem->se_list); - se_mem->se_len = (length > dma_size) ? dma_size : length; - memset(buf, 0, se_mem->se_len); - kunmap_atomic(buf, KM_IRQ0); - - list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_tasks_se_num++; - - DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" - " Offset(%u)\n", se_mem->se_page, se_mem->se_len, - se_mem->se_off); - - length -= se_mem->se_len; + if (set_counts) { + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); } - DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - T_TASK(cmd)->t_tasks_se_num); + cmd->t_task_list_num = task_cdbs; + atomic_set(&cmd->t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); return 0; -out: - if (se_mem) - __free_pages(se_mem->se_page, 0); - kmem_cache_free(se_mem_cache, se_mem); - return -1; } -u32 transport_calc_sg_num( - struct se_task *task, - struct se_mem *in_se_mem, - u32 task_offset) +void *transport_kmap_first_data_page(struct se_cmd *cmd) { - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = SE_DEV(se_cmd); - struct se_mem *se_mem = in_se_mem; - struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); - u32 sg_length, task_size = task->task_size, task_sg_num_padded; - - while (task_size != 0) { - DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" - " se_mem->se_off(%u) task_offset(%u)\n", - se_mem->se_page, se_mem->se_len, - se_mem->se_off, task_offset); - - if (task_offset == 0) { - if (task_size >= se_mem->se_len) { - sg_length = se_mem->se_len; - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } else { - sg_length = task_size; - task_size -= sg_length; - goto next; - } + struct scatterlist *sg = cmd->t_data_sg; - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - } else { - if ((se_mem->se_len - task_offset) > task_size) { - sg_length = task_size; - task_size -= sg_length; - goto next; - } else { - sg_length = (se_mem->se_len - task_offset); - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } - - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - - task_offset = 0; - } - task_size -= sg_length; -next: - DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", - task->task_no, task_size); - - task->task_sg_num++; - } - /* - * Check if the fabric module driver is requesting that all - * struct se_task->task_sg[] be chained together.. If so, - * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL. - */ - if (tfo->task_sg_chaining) { - task_sg_num_padded = (task->task_sg_num + 1); - task->task_padded_sg = 1; - } else - task_sg_num_padded = task->task_sg_num; - - task->task_sg = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg)) { - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg\n"); - return 0; - } - sg_init_table(&task->task_sg[0], task_sg_num_padded); + BUG_ON(!sg); /* - * Setup task->task_sg_bidi for SCSI READ payload for - * TCM/pSCSI passthrough if present for BIDI-COMMAND + * We need to take into account a possible offset here for fabrics like + * tcm_loop who may be using a contig buffer from the SCSI midlayer for + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() */ - if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { - task->task_sg_bidi = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg_bidi)) { - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg_bidi\n"); - return 0; - } - sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); - } - /* - * For the chaining case, setup the proper end of SGL for the - * initial submission struct task into struct se_subsystem_api. - * This will be cleared later by transport_do_task_sg_chain() - */ - if (task->task_padded_sg) { - sg_mark_end(&task->task_sg[task->task_sg_num - 1]); - /* - * Added the 'if' check before marking end of bi-directional - * scatterlist (which gets created only in case of request - * (RD + WR). - */ - if (task->task_sg_bidi) - sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); - } - - DEBUG_SC("Successfully allocated task->task_sg_num(%u)," - " task_sg_num_padded(%u)\n", task->task_sg_num, - task_sg_num_padded); - - return task->task_sg_num; + return kmap(sg_page(sg)) + sg->offset; } +EXPORT_SYMBOL(transport_kmap_first_data_page); -static inline int transport_set_tasks_sectors_disk( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) +void transport_kunmap_first_data_page(struct se_cmd *cmd) { - if ((lba + sectors) > transport_dev_end_lba(dev)) { - task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); - - if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } - } else { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; - } - - return 0; + kunmap(sg_page(cmd->t_data_sg)); } +EXPORT_SYMBOL(transport_kunmap_first_data_page); -static inline int transport_set_tasks_sectors_non_disk( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) +static int +transport_generic_get_mem(struct se_cmd *cmd) { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; + u32 length = cmd->data_length; + unsigned int nents; + struct page *page; + int i = 0; - return 0; -} + nents = DIV_ROUND_UP(length, PAGE_SIZE); + cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); + if (!cmd->t_data_sg) + return -ENOMEM; -static inline int transport_set_tasks_sectors( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) -{ - return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? - transport_set_tasks_sectors_disk(task, dev, lba, sectors, - max_sectors_set) : - transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, - max_sectors_set); -} + cmd->t_data_nents = nents; + sg_init_table(cmd->t_data_sg, nents); -static int transport_map_sg_to_mem( - struct se_cmd *cmd, - struct list_head *se_mem_list, - void *in_mem, - u32 *se_mem_cnt) -{ - struct se_mem *se_mem; - struct scatterlist *sg; - u32 sg_count = 1, cmd_size = cmd->data_length; + while (length) { + u32 page_len = min_t(u32, length, PAGE_SIZE); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto out; - if (!in_mem) { - printk(KERN_ERR "No source scatterlist\n"); - return -1; + sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); + length -= page_len; + i++; } - sg = (struct scatterlist *)in_mem; - - while (cmd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; - } - INIT_LIST_HEAD(&se_mem->se_list); - DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" - " sg_page: %p offset: %d length: %d\n", cmd_size, - sg_page(sg), sg->offset, sg->length); - - se_mem->se_page = sg_page(sg); - se_mem->se_off = sg->offset; - - if (cmd_size > sg->length) { - se_mem->se_len = sg->length; - sg = sg_next(sg); - sg_count++; - } else - se_mem->se_len = cmd_size; - - cmd_size -= se_mem->se_len; - - DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", - *se_mem_cnt, cmd_size); - DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", - se_mem->se_page, se_mem->se_off, se_mem->se_len); + return 0; - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; +out: + while (i >= 0) { + __free_page(sg_page(&cmd->t_data_sg[i])); + i--; } - - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" - " struct se_mem\n", sg_count, *se_mem_cnt); - - if (sg_count != *se_mem_cnt) - BUG(); - - return 0; + kfree(cmd->t_data_sg); + cmd->t_data_sg = NULL; + return -ENOMEM; } -/* transport_map_mem_to_sg(): - * - * - */ -int transport_map_mem_to_sg( - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset) +/* Reduce sectors if they are too long for the device */ +static inline sector_t transport_limit_task_sectors( + struct se_device *dev, + unsigned long long lba, + sector_t sectors) { - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_mem *se_mem = in_se_mem; - struct scatterlist *sg = (struct scatterlist *)in_mem; - u32 task_size = task->task_size, sg_no = 0; + sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); - if (!sg) { - printk(KERN_ERR "Unable to locate valid struct" - " scatterlist pointer\n"); - return -1; - } - - while (task_size != 0) { - /* - * Setup the contigious array of scatterlists for - * this struct se_task. - */ - sg_assign_page(sg, se_mem->se_page); - - if (*task_offset == 0) { - sg->offset = se_mem->se_off; - - if (task_size >= se_mem->se_len) { - sg->length = se_mem->se_len; + if (dev->transport->get_device_type(dev) == TYPE_DISK) + if ((lba + sectors) > transport_dev_end_lba(dev)) + sectors = ((transport_dev_end_lba(dev) - lba) + 1); - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } else { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset = sg->length; - - goto next; - } - - } else { - sg->offset = (*task_offset + se_mem->se_off); - - if ((se_mem->se_len - *task_offset) > task_size) { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset += sg->length; - - goto next; - } else { - sg->length = (se_mem->se_len - *task_offset); - - if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } - - *task_offset = 0; - } - task_size -= sg->length; -next: - DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" - " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, - sg_page(sg), sg->length, sg->offset, task_size, *task_offset); - - sg_no++; - if (!(task_size)) - break; - - sg = sg_next(sg); - - if (task_size > se_cmd->data_length) - BUG(); - } - *out_se_mem = se_mem; - - DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" - " SGs\n", task->task_no, *se_mem_cnt, sg_no); - - return 0; + return sectors; } + /* * This function can be used by HW target mode drivers to create a linked * scatterlist from all contiguously allocated struct se_task->task_sg[]. @@ -4751,334 +3983,236 @@ next: */ void transport_do_task_sg_chain(struct se_cmd *cmd) { - struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; - struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; - struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; + struct scatterlist *sg_first = NULL; + struct scatterlist *sg_prev = NULL; + int sg_prev_nents = 0; + struct scatterlist *sg; struct se_task *task; - struct target_core_fabric_ops *tfo = CMD_TFO(cmd); - u32 task_sg_num = 0, sg_count = 0; + u32 chained_nents = 0; int i; - if (tfo->task_sg_chaining == 0) { - printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" - " %s\n", tfo->get_fabric_name()); - dump_stack(); - return; - } + BUG_ON(!cmd->se_tfo->task_sg_chaining); + /* * Walk the struct se_task list and setup scatterlist chains - * for each contiguosly allocated struct se_task->task_sg[]. + * for each contiguously allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { - if (!(task->task_sg) || !(task->task_padded_sg)) + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (!task->task_sg) continue; - if (sg_head && sg_link) { - sg_head_cur = &task->task_sg[0]; - sg_link_cur = &task->task_sg[task->task_sg_num]; - /* - * Either add chain or mark end of scatterlist - */ - if (!(list_is_last(&task->t_list, - &T_TASK(cmd)->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() - */ - sg_end_cur = &task->task_sg[task->task_sg_num - 1]; - sg_end_cur->page_link &= ~0x02; - - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); - } else { - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; - } + BUG_ON(!task->task_padded_sg); - sg_head = sg_head_cur; - sg_link = sg_link_cur; - continue; - } - sg_head = sg_first = &task->task_sg[0]; - sg_link = &task->task_sg[task->task_sg_num]; - /* - * Check for single task.. - */ - if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() - */ - sg_end = &task->task_sg[task->task_sg_num - 1]; - sg_end->page_link &= ~0x02; - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); + if (!sg_first) { + sg_first = task->task_sg; + chained_nents = task->task_sg_nents; } else { - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; + sg_chain(sg_prev, sg_prev_nents, task->task_sg); + chained_nents += task->task_sg_nents; } + + sg_prev = task->task_sg; + sg_prev_nents = task->task_sg_nents; } /* * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - T_TASK(cmd)->t_tasks_sg_chained = sg_first; - T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; + cmd->t_tasks_sg_chained = sg_first; + cmd->t_tasks_sg_chained_no = chained_nents; - DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, - T_TASK(cmd)->t_tasks_sg_chained_no); + pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, + cmd->t_tasks_sg_chained_no); - for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, - T_TASK(cmd)->t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_tasks_sg_chained, sg, + cmd->t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", - i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); + pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", + i, sg, sg_page(sg), sg->length, sg->offset); if (sg_is_chain(sg)) - DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); + pr_debug("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) - DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); + pr_debug("SG: %p sg_is_last=1\n", sg); } } EXPORT_SYMBOL(transport_do_task_sg_chain); -static int transport_do_se_mem_map( - struct se_device *dev, - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset_in) -{ - u32 task_offset = *task_offset_in; - int ret = 0; - /* - * se_subsystem_api_t->do_se_mem_map is used when internal allocation - * has been done by the transport plugin. - */ - if (TRANSPORT(dev)->do_se_mem_map) { - ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, - in_mem, in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - if (ret == 0) - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; - - return ret; - } - - BUG_ON(list_empty(se_mem_list)); - /* - * This is the normal path for all normal non BIDI and BIDI-COMMAND - * WRITE payloads.. If we need to do BIDI READ passthrough for - * TCM/pSCSI the first call to transport_do_se_mem_map -> - * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the - * allocation for task->task_sg_bidi, and the subsequent call to - * transport_do_se_mem_map() from transport_generic_get_cdb_count() - */ - if (!(task->task_sg_bidi)) { - /* - * Assume default that transport plugin speaks preallocated - * scatterlists. - */ - if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) - return -1; - /* - * struct se_task->task_sg now contains the struct scatterlist array. - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - } - /* - * Handle the se_mem_list -> struct task->task_sg_bidi - * memory map for the extra BIDI READ payload - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); -} - -static u32 transport_generic_get_cdb_count( +/* + * Break up cmd into chunks transport can handle + */ +static int transport_allocate_data_tasks( struct se_cmd *cmd, unsigned long long lba, - u32 sectors, enum dma_data_direction data_direction, - struct list_head *mem_list, - int set_counts) + struct scatterlist *sgl, + unsigned int sgl_nents) { unsigned char *cdb = NULL; struct se_task *task; - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; - struct se_device *dev = SE_DEV(cmd); - int max_sectors_set = 0, ret; - u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; - - if (!mem_list) { - printk(KERN_ERR "mem_list is NULL in transport_generic_get" - "_cdb_count()\n"); - return 0; - } - /* - * While using RAMDISK_DR backstores is the only case where - * mem_list will ever be empty at this point. - */ - if (!(list_empty(mem_list))) - se_mem = list_entry(mem_list->next, struct se_mem, se_list); - /* - * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to - * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation - */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && - (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, - struct se_mem, se_list); - - while (sectors) { - DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", - CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, - transport_dev_end_lba(dev)); + struct se_device *dev = cmd->se_dev; + unsigned long flags; + sector_t sectors; + int task_count, i, ret; + sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; + u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; + struct scatterlist *sg; + struct scatterlist *cmd_sg; - task = transport_generic_get_task(cmd, data_direction); - if (!(task)) - goto out; + WARN_ON(cmd->data_length % sector_size); + sectors = DIV_ROUND_UP(cmd->data_length, sector_size); + task_count = DIV_ROUND_UP(sectors, dev_max_sectors); - transport_set_tasks_sectors(task, dev, lba, sectors, - &max_sectors_set); + cmd_sg = sgl; + for (i = 0; i < task_count; i++) { + unsigned int task_size; + int count; + + task = transport_generic_get_task(cmd, data_direction); + if (!task) + return -ENOMEM; task->task_lba = lba; - lba += task->task_sectors; - sectors -= task->task_sectors; - task->task_size = (task->task_sectors * - DEV_ATTRIB(dev)->block_size); - - cdb = TRANSPORT(dev)->get_cdb(task); - if ((cdb)) { - memcpy(cdb, T_TASK(cmd)->t_task_cdb, - scsi_command_size(T_TASK(cmd)->t_task_cdb)); - cmd->transport_split_cdb(task->task_lba, - &task->task_sectors, cdb); - } + task->task_sectors = min(sectors, dev_max_sectors); + task->task_size = task->task_sectors * sector_size; - /* - * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for T_TASK(cmd)->t_mem_list. And setup the - * task->task_sg and if necessary task->task_sg_bidi - */ - ret = transport_do_se_mem_map(dev, task, mem_list, - NULL, se_mem, &se_mem_lout, &se_mem_cnt, - &task_offset_in); - if (ret < 0) - goto out; + cdb = dev->transport->get_cdb(task); + BUG_ON(!cdb); + + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + + /* Update new cdb with updated lba/sectors */ + cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); - se_mem = se_mem_lout; /* - * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI - * - * Note that the first call to transport_do_se_mem_map() above will - * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() - * -> transport_calc_sg_num(), and the second here will do the - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. + * Check if the fabric module driver is requesting that all + * struct se_task->task_sg[] be chained together.. If so, + * then allocate an extra padding SG entry for linking and + * marking the end of the chained SGL. + * Possibly over-allocate task sgl size by using cmd sgl size. + * It's so much easier and only a waste when task_count > 1. + * That is extremely rare. */ - if (task->task_sg_bidi != NULL) { - ret = transport_do_se_mem_map(dev, task, - T_TASK(cmd)->t_mem_bidi_list, NULL, - se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, - &task_offset_in); - if (ret < 0) - goto out; + task->task_sg_nents = sgl_nents; + if (cmd->se_tfo->task_sg_chaining) { + task->task_sg_nents++; + task->task_padded_sg = 1; + } - se_mem_bidi = se_mem_bidi_lout; + task->task_sg = kmalloc(sizeof(struct scatterlist) * + task->task_sg_nents, GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; } - task_cdbs++; - DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", - task_cdbs, task->task_sg_num); + sg_init_table(task->task_sg, task->task_sg_nents); - if (max_sectors_set) { - max_sectors_set = 0; - continue; + task_size = task->task_size; + + /* Build new sgl, only up to task_size */ + for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { + if (cmd_sg->length > task_size) + break; + + *sg = *cmd_sg; + task_size -= cmd_sg->length; + cmd_sg = sg_next(cmd_sg); } - if (!sectors) - break; - } + lba += task->task_sectors; + sectors -= task->task_sectors; - if (set_counts) { - atomic_inc(&T_TASK(cmd)->t_fe_count); - atomic_inc(&T_TASK(cmd)->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } + /* + * Now perform the memory map of task->task_sg[] into backend + * subsystem memory.. + */ + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_data_SG) + continue; - DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", - CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) - ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); + ret = dev->transport->map_data_SG(task); + if (ret < 0) + return 0; + } - return task_cdbs; -out: - return 0; + return task_count; } static int -transport_map_control_cmd_to_task(struct se_cmd *cmd) +transport_allocate_control_task(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; unsigned char *cdb; struct se_task *task; - int ret; + unsigned long flags; + int ret = 0; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; - cdb = TRANSPORT(dev)->get_cdb(task); - if (cdb) - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + cdb = dev->transport->get_cdb(task); + BUG_ON(!cdb); + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; + } + + memcpy(task->task_sg, cmd->t_data_sg, + sizeof(struct scatterlist) * cmd->t_data_nents); task->task_size = cmd->data_length; - task->task_sg_num = - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; + task->task_sg_nents = cmd->t_data_nents; - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - u32 se_mem_cnt = 0, task_offset = 0; - - if (!list_empty(T_TASK(cmd)->t_mem_list)) - se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, - struct se_mem, se_list); - - ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_list, NULL, se_mem, - &se_mem_lout, &se_mem_cnt, &task_offset); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - - if (dev->transport->map_task_SG) - return dev->transport->map_task_SG(task); - return 0; - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (dev->transport->map_task_non_SG) - return dev->transport->map_task_non_SG(task); - return 0; + if (dev->transport->map_control_SG) + ret = dev->transport->map_control_SG(task); } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) - return dev->transport->cdb_none(task); - return 0; + ret = dev->transport->cdb_none(task); } else { + pr_err("target: Unknown control cmd type!\n"); BUG(); - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } + + /* Success! Return number of tasks allocated */ + if (ret == 0) + return 1; + return ret; +} + +static u32 transport_allocate_tasks( + struct se_cmd *cmd, + unsigned long long lba, + enum dma_data_direction data_direction, + struct scatterlist *sgl, + unsigned int sgl_nents) +{ + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) + return transport_allocate_data_tasks(cmd, lba, data_direction, + sgl, sgl_nents); + else + return transport_allocate_control_task(cmd); + } + /* transport_generic_new_cmd(): Called from transport_processing_thread() * * Allocate storage transport resources from a set of values predefined @@ -5088,64 +4222,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) /* * Generate struct se_task(s) and/or their payloads for this CDB. */ -static int transport_generic_new_cmd(struct se_cmd *cmd) +int transport_generic_new_cmd(struct se_cmd *cmd) { - struct se_portal_group *se_tpg; - struct se_task *task; - struct se_device *dev = SE_DEV(cmd); int ret = 0; /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() - * to setup beforehand the linked list of physical memory at - * T_TASK(cmd)->t_mem_list of struct se_mem->se_page + * beforehand. */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { - ret = transport_allocate_resources(cmd); + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && + cmd->data_length) { + ret = transport_generic_get_mem(cmd); if (ret < 0) return ret; } - - ret = transport_get_sectors(cmd); - if (ret < 0) - return ret; - + /* + * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for + * control or data CDB types, and perform the map to backend subsystem + * code from SGL memory allocated here by transport_generic_get_mem(), or + * via pre-existing SGL memory setup explictly by fabric module code with + * transport_generic_map_mem_to_cmd(). + */ ret = transport_new_cmd_obj(cmd); if (ret < 0) return ret; - /* - * Determine if the calling TCM fabric module is talking to - * Linux/NET via kernel sockets and needs to allocate a - * struct iovec array to complete the struct se_cmd - */ - se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; - if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { - ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - } - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_task_SG) - continue; - - ret = dev->transport->map_task_SG(task); - if (ret < 0) - return ret; - } - } else { - ret = transport_map_control_cmd_to_task(cmd); - if (ret < 0) - return ret; - } - - /* - * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. + * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) * will be added to the struct se_device execution queue after its WRITE * data has arrived. (ie: It gets handled by the transport processing @@ -5162,6 +4265,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) transport_execute_tasks(cmd); return 0; } +EXPORT_SYMBOL(transport_generic_new_cmd); /* transport_generic_process_write(): * @@ -5169,68 +4273,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) */ void transport_generic_process_write(struct se_cmd *cmd) { -#if 0 - /* - * Copy SCSI Presented DTL sector(s) from received buffers allocated to - * original EDTL - */ - if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!T_TASK(cmd)->t_tasks_se_num) { - unsigned char *dst, *buf = - (unsigned char *)T_TASK(cmd)->t_task_buf; - - dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); - if (!(dst)) { - printk(KERN_ERR "Unable to allocate memory for" - " WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - memcpy(dst, buf, cmd->cmd_spdtl); - - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = dst; - } else { - struct scatterlist *sg = - (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; - struct scatterlist *orig_sg; - - orig_sg = kzalloc(sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num, - GFP_KERNEL))) { - if (!(orig_sg)) { - printk(KERN_ERR "Unable to allocate memory" - " for WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - - memcpy(orig_sg, T_TASK(cmd)->t_task_buf, - sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num); - - cmd->data_length = cmd->cmd_spdtl; - /* - * FIXME, clear out original struct se_task and state - * information. - */ - if (transport_generic_new_cmd(cmd) < 0) { - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - kfree(orig_sg); - return; - } - - transport_memcpy_write_sg(cmd, orig_sg); - } - } -#endif transport_execute_tasks(cmd); } EXPORT_SYMBOL(transport_generic_process_write); +static int transport_write_pending_qf(struct se_cmd *cmd) +{ + return cmd->se_tfo->write_pending(cmd); +} + /* transport_generic_write_pending(): * * @@ -5240,24 +4291,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - /* - * For the TCM control CDBs using a contiguous buffer, do the memcpy - * from the passed Linux/SCSI struct scatterlist located at - * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at - * T_TASK(se_cmd)->t_task_buf. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_read_contig(cmd, - T_TASK(cmd)->t_task_buf, - T_TASK(cmd)->t_task_pt_sgl); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) + return ret; + + cmd->transport_qf_callback = NULL; + return 0; + } + /* * Clear the se_cmd for WRITE_PENDING status in order to set - * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data + * cmd->t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe - * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending + * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. */ transport_cmd_check_stop(cmd, 1, 0); @@ -5266,26 +4319,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd) * Call the fabric write_pending function here to let the * frontend know that WRITE buffers are ready. */ - ret = CMD_TFO(cmd)->write_pending(cmd); - if (ret < 0) + ret = cmd->se_tfo->write_pending(cmd); + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) return ret; return PYX_TRANSPORT_WRITE_PENDING; + +queue_full: + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + cmd->t_state = TRANSPORT_COMPLETE_QF_WP; + transport_handle_queue_full(cmd, cmd->se_dev, + transport_write_pending_qf); + return ret; } -/* transport_release_cmd_to_pool(): - * - * - */ -void transport_release_cmd_to_pool(struct se_cmd *cmd) +void transport_release_cmd(struct se_cmd *cmd) { - BUG_ON(!T_TASK(cmd)); - BUG_ON(!CMD_TFO(cmd)); + BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_to_pool(cmd); + cmd->se_tfo->release_cmd(cmd); } -EXPORT_SYMBOL(transport_release_cmd_to_pool); +EXPORT_SYMBOL(transport_release_cmd); /* transport_generic_free_cmd(): * @@ -5294,19 +4351,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool); void transport_generic_free_cmd( struct se_cmd *cmd, int wait_for_tasks, - int release_to_pool, int session_reinstatement) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) - transport_release_cmd_to_pool(cmd); + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) + transport_release_cmd(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (SE_LUN(cmd)) { + if (cmd->se_lun) { #if 0 - printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" - " SE_LUN(cmd)\n", cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("cmd: %p ITT: 0x%08x contains" + " cmd->se_lun\n", cmd, + cmd->se_tfo->get_task_tag(cmd)); #endif transport_lun_remove_cmd(cmd); } @@ -5316,8 +4372,7 @@ void transport_generic_free_cmd( transport_free_dev_tasks(cmd); - transport_generic_remove(cmd, release_to_pool, - session_reinstatement); + transport_generic_remove(cmd, session_reinstatement); } } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -5343,43 +4398,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); - DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" - " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_stop)) { + atomic_set(&cmd->transport_lun_stop, 0); + pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" + " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); - return -1; + return -EPERM; } - atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); - DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); + pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" + " %d\n", cmd, cmd->t_task_list_num, ret); if (!ret) { - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); - wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", + cmd->se_tfo->get_task_tag(cmd)); + wait_for_completion(&cmd->transport_lun_stop_comp); + pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", + cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); return 0; } -/* #define DEBUG_CLEAR_LUN */ -#ifdef DEBUG_CLEAR_LUN -#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) -#else -#define DEBUG_CLEAR_L(x...) -#endif - static void __transport_clear_lun_from_sessions(struct se_lun *lun) { struct se_cmd *cmd = NULL; @@ -5389,66 +4437,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * Initiator Port. */ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty_careful(&lun->lun_cmd_list)) { - cmd = list_entry(lun->lun_cmd_list.next, - struct se_cmd, se_lun_list); - list_del(&cmd->se_lun_list); - - if (!(T_TASK(cmd))) { - printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" - "[i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + while (!list_empty(&lun->lun_cmd_list)) { + cmd = list_first_entry(&lun->lun_cmd_list, + struct se_cmd, se_lun_node); + list_del(&cmd->se_lun_node); + + atomic_set(&cmd->transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&T_TASK(cmd)->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" + spin_lock(&cmd->t_state_lock); + pr_debug("SE_LUN[%d] - Setting cmd->transport" "_lun_stop for ITT: 0x%08x\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); - spin_unlock(&T_TASK(cmd)->t_state_lock); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); + atomic_set(&cmd->transport_lun_stop, 1); + spin_unlock(&cmd->t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - if (!(SE_LUN(cmd))) { - printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + if (!cmd->se_lun) { + pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); BUG(); } /* * If the Storage engine still owns the iscsi_cmd_t, determine * and/or stop its context. */ - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" + "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { + if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" + pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" "_wait_for_tasks(): SUCCESS\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (!atomic_read(&cmd->transport_dev_active)) { + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5465,24 +4506,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { - DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (atomic_read(&cmd->transport_lun_fe_stop)) { + pr_debug("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, - cmd, CMD_TFO(cmd)->get_task_tag(cmd)); + cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); + complete(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", + lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5502,11 +4543,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, (void *)lun, + kt = kthread_run(transport_clear_lun_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { - printk(KERN_ERR "Unable to start clear_lun thread\n"); - return -1; + pr_err("Unable to start clear_lun thread\n"); + return PTR_ERR(kt); } wait_for_completion(&lun->lun_shutdown_comp); @@ -5528,20 +4569,20 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by + * The cmd->transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { + if (atomic_read(&cmd->transport_lun_stop)) { - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe" + pr_debug("wait_for_tasks: Stopping" + " wait_for_completion(&cmd->t_tasktransport_lun_fe" "_stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); /* * There is a special case for WRITES where a FE exception + * LUN shutdown means ConfigFS context is still sleeping on @@ -5549,10 +4590,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); - wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete(&cmd->transport_lun_stop_comp); + wait_for_completion(&cmd->transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5560,44 +4601,44 @@ static void transport_generic_wait_for_tasks( * struct se_cmd, now owns the structure and can be released through * normal means below. */ - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" + pr_debug("wait_for_tasks: Stopped" + " wait_for_completion(&cmd->t_tasktransport_lun_fe_" "stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + atomic_set(&cmd->transport_lun_stop, 0); } - if (!atomic_read(&T_TASK(cmd)->t_transport_active) || - atomic_read(&T_TASK(cmd)->t_transport_aborted)) + if (!atomic_read(&cmd->t_transport_active) || + atomic_read(&cmd->t_transport_aborted)) goto remove; - atomic_set(&T_TASK(cmd)->t_transport_stop, 1); + atomic_set(&cmd->t_transport_stop, 1); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" - " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, + " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); + wait_for_completion(&cmd->t_transport_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_set(&T_TASK(cmd)->t_transport_active, 0); - atomic_set(&T_TASK(cmd)->t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 0); + atomic_set(&cmd->t_transport_stop, 0); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("wait_for_tasks: Stopped wait_for_compltion(" + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!remove_cmd) return; - transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); + transport_generic_free_cmd(cmd, 0, session_reinstatement); } static int transport_get_sense_codes( @@ -5632,13 +4673,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5651,7 +4692,7 @@ int transport_send_check_condition_and_sense( * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE * from include/scsi/scsi_cmnd.h */ - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); /* * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses @@ -5788,8 +4829,7 @@ int transport_send_check_condition_and_sense( cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; after_reason: - CMD_TFO(cmd)->queue_status(cmd); - return 0; + return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(transport_send_check_condition_and_sense); @@ -5797,18 +4837,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { - if (!(send_status) || + if (atomic_read(&cmd->t_transport_aborted) != 0) { + if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 - printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); ret = 1; } return ret; @@ -5824,8 +4864,8 @@ void transport_send_task_abort(struct se_cmd *cmd) * queued back to fabric module by transport_check_aborted_status(). */ if (cmd->data_direction == DMA_TO_DEVICE) { - if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { - atomic_inc(&T_TASK(cmd)->t_transport_aborted); + if (cmd->se_tfo->write_pending_status(cmd) != 0) { + atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5834,11 +4874,11 @@ void transport_send_task_abort(struct se_cmd *cmd) } cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 - printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," + " ITT: 0x%08x\n", cmd->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); } /* transport_generic_do_tmr(): @@ -5847,14 +4887,12 @@ void transport_send_task_abort(struct se_cmd *cmd) */ int transport_generic_do_tmr(struct se_cmd *cmd) { - struct se_cmd *ref_cmd; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; switch (tmr->function) { case TMR_ABORT_TASK: - ref_cmd = tmr->ref_cmd; tmr->response = TMR_FUNCTION_REJECTED; break; case TMR_ABORT_TASK_SET: @@ -5874,14 +4912,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd) tmr->response = TMR_FUNCTION_REJECTED; break; default: - printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", + pr_err("Uknown TMR function: 0x%02x.\n", tmr->function); tmr->response = TMR_FUNCTION_REJECTED; break; } cmd->t_state = TRANSPORT_ISTATE_PROCESSING; - CMD_TFO(cmd)->queue_tm_rsp(cmd); + cmd->se_tfo->queue_tm_rsp(cmd); transport_cmd_check_stop(cmd, 2, 0); return 0; @@ -5911,62 +4949,54 @@ transport_get_task_from_state_list(struct se_device *dev) static void transport_processing_shutdown(struct se_device *dev) { struct se_cmd *cmd; - struct se_queue_req *qr; struct se_task *task; - u8 state; unsigned long flags; /* * Empty the struct se_device's struct se_task state list. */ spin_lock_irqsave(&dev->execute_task_lock, flags); while ((task = transport_get_task_from_state_list(dev))) { - if (!(TASK_CMD(task))) { - printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + if (!task->task_se_cmd) { + pr_err("task->task_se_cmd is NULL!\n"); continue; } - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - if (!T_TASK(cmd)) { - printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); - continue; - } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); - DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," - " i_state/def_i_state: %d/%d, t_state/def_t_state:" + pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," + " i_state: %d, t_state/def_t_state:" " %d/%d cdb: 0x%02x\n", cmd, task, - CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, - T_TASK(cmd)->t_task_cdb[0]); - DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" + cmd->t_task_cdb[0]); + pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + cmd->se_tfo->get_task_tag(cmd), + cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - DEBUG_DO("Waiting for task: %p to shutdown for dev:" + pr_debug("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); wait_for_completion(&task->task_stop_comp); - DEBUG_DO("Completed task: %p shutdown for dev: %p\n", + pr_debug("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5976,72 +5006,72 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); - DEBUG_DO("Skipping task: %p, dev: %p for" + pr_debug("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&T_TASK(cmd)->t_transport_active)) { - DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" + if (atomic_read(&cmd->t_transport_active)) { + pr_debug("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", + pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -6050,18 +5080,12 @@ static void transport_processing_shutdown(struct se_device *dev) /* * Empty the struct se_device's struct se_cmd list. */ - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { - spin_unlock_irqrestore( - &dev->dev_queue_obj->cmd_queue_lock, flags); - cmd = (struct se_cmd *)qr->cmd; - state = qr->state; - kfree(qr); - - DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", - cmd, state); - - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { + + pr_debug("From Device Queue: cmd: %p t_state: %d\n", + cmd, cmd->t_state); + + if (atomic_read(&cmd->t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -6070,11 +5094,9 @@ static void transport_processing_shutdown(struct se_device *dev) } else { transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); } /* transport_processing_thread(): @@ -6083,16 +5105,15 @@ static void transport_processing_shutdown(struct se_device *dev) */ static int transport_processing_thread(void *param) { - int ret, t_state; + int ret; struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - struct se_queue_req *qr; set_user_nice(current, -20); while (!kthread_should_stop()) { - ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, - atomic_read(&dev->dev_queue_obj->queue_cnt) || + ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, + atomic_read(&dev->dev_queue_obj.queue_cnt) || kthread_should_stop()); if (ret < 0) goto out; @@ -6108,22 +5129,18 @@ static int transport_processing_thread(void *param) get_cmd: __transport_execute_tasks(dev); - qr = transport_get_qr_from_queue(dev->dev_queue_obj); - if (!(qr)) + cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); + if (!cmd) continue; - cmd = (struct se_cmd *)qr->cmd; - t_state = qr->state; - kfree(qr); - - switch (t_state) { + switch (cmd->t_state) { case TRANSPORT_NEW_CMD_MAP: - if (!(CMD_TFO(cmd)->new_cmd_map)) { - printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" + if (!cmd->se_tfo->new_cmd_map) { + pr_err("cmd->se_tfo->new_cmd_map is" " NULL for TRANSPORT_NEW_CMD_MAP\n"); BUG(); } - ret = CMD_TFO(cmd)->new_cmd_map(cmd); + ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, @@ -6134,7 +5151,9 @@ get_cmd: /* Fall through */ case TRANSPORT_NEW_CMD: ret = transport_generic_new_cmd(cmd); - if (ret < 0) { + if (ret == -EAGAIN) + break; + else if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, 0, (cmd->data_direction != @@ -6149,10 +5168,10 @@ get_cmd: transport_generic_complete_ok(cmd); break; case TRANSPORT_REMOVE: - transport_generic_remove(cmd, 1, 0); + transport_generic_remove(cmd, 0); break; case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0, 1, 0); + transport_generic_free_cmd(cmd, 0, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); @@ -6164,13 +5183,16 @@ get_cmd: transport_stop_all_task_timers(cmd); transport_generic_request_timeout(cmd); break; + case TRANSPORT_COMPLETE_QF_WP: + transport_generic_write_pending(cmd); + break; default: - printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" + pr_err("Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", t_state, cmd->deferred_t_state, - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), - SE_LUN(cmd)->unpacked_lun); + " %u\n", cmd->t_state, cmd->deferred_t_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), + cmd->se_lun->unpacked_lun); BUG(); } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index df355176a37..31e3c652527 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -49,15 +49,15 @@ int core_scsi3_ua_check( struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; - if (!(sess)) + if (!sess) return 0; nacl = sess->se_node_acl; - if (!(nacl)) + if (!nacl) return 0; deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) + if (!atomic_read(&deve->ua_count)) return 0; /* * From sam4r14, section 5.14 Unit attention condition: @@ -80,10 +80,10 @@ int core_scsi3_ua_check( case REQUEST_SENSE: return 0; default: - return -1; + return -EINVAL; } - return -1; + return -EINVAL; } int core_scsi3_ua_allocate( @@ -97,13 +97,13 @@ int core_scsi3_ua_allocate( /* * PASSTHROUGH OPS */ - if (!(nacl)) - return -1; + if (!nacl) + return -EINVAL; ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); - if (!(ua)) { - printk(KERN_ERR "Unable to allocate struct se_ua\n"); - return -1; + if (!ua) { + pr_err("Unable to allocate struct se_ua\n"); + return -ENOMEM; } INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_nacl_list); @@ -177,9 +177,9 @@ int core_scsi3_ua_allocate( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" + pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" " 0x%02x, ASCQ: 0x%02x\n", - TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, + nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, asc, ascq); atomic_inc(&deve->ua_count); @@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition( u8 *asc, u8 *ascq) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_dev; struct se_dev_entry *deve; struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; struct se_ua *ua = NULL, *ua_p; int head = 1; - if (!(sess)) + if (!sess) return; nacl = sess->se_node_acl; - if (!(nacl)) + if (!nacl) return; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) { + if (!atomic_read(&deve->ua_count)) { spin_unlock_irq(&nacl->device_list_lock); return; } @@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition( * highest priority UNIT_ATTENTION and ASC/ASCQ without * clearing it. */ - if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { + if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { *asc = ua->ua_asc; *ascq = ua->ua_ascq; break; @@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" + pr_debug("[%s]: %s UNIT ATTENTION condition with" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " reported ASC: 0x%02x, ASCQ: 0x%02x\n", - TPG_TFO(nacl->se_tpg)->get_fabric_name(), - (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : - "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, - cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); + nacl->se_tpg->se_tpg_tfo->get_fabric_name(), + (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : + "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, + cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); } int core_scsi3_ua_clear_for_request_sense( @@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense( struct se_ua *ua = NULL, *ua_p; int head = 1; - if (!(sess)) - return -1; + if (!sess) + return -EINVAL; nacl = sess->se_node_acl; - if (!(nacl)) - return -1; + if (!nacl) + return -EINVAL; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) { + if (!atomic_read(&deve->ua_count)) { spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EPERM; } /* * The highest priority Unit Attentions are placed at the head of the @@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" + pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," - " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), + " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), cmd->orig_fe_lun, *asc, *ascq); - return (head) ? -1 : 0; + return (head) ? -EPERM : 0; } diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile index 7a5c2b64cf6..20b14bb087c 100644 --- a/drivers/target/tcm_fc/Makefile +++ b/drivers/target/tcm_fc/Makefile @@ -1,15 +1,6 @@ -EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \ - -I$(srctree)/drivers/scsi/ \ - -I$(srctree)/include/scsi/ \ - -I$(srctree)/drivers/target/tcm_fc/ - -tcm_fc-y += tfc_cmd.o \ - tfc_conf.o \ - tfc_io.o \ - tfc_sess.o +tcm_fc-y += tfc_cmd.o \ + tfc_conf.o \ + tfc_io.o \ + tfc_sess.o obj-$(CONFIG_TCM_FC) += tcm_fc.o - -ifdef CONFIGFS_TCM_FC_DEBUG -EXTRA_CFLAGS += -DTCM_FC_DEBUG -endif diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 7b82f1b7fef..f7fff7ed63c 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -23,30 +23,6 @@ #define FT_TPG_NAMELEN 32 /* max length of TPG name */ #define FT_LUN_NAMELEN 32 /* max length of LUN name */ -/* - * Debug options. - */ -#define FT_DEBUG_CONF 0x01 /* configuration messages */ -#define FT_DEBUG_SESS 0x02 /* session messages */ -#define FT_DEBUG_TM 0x04 /* TM operations */ -#define FT_DEBUG_IO 0x08 /* I/O commands */ -#define FT_DEBUG_DATA 0x10 /* Data transfer */ - -extern unsigned int ft_debug_logging; /* debug options */ - -#define FT_DEBUG(mask, fmt, args...) \ - do { \ - if (ft_debug_logging & (mask)) \ - printk(KERN_INFO "tcm_fc: %s: " fmt, \ - __func__, ##args); \ - } while (0) - -#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args) -#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args) -#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args) -#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args) -#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args) - struct ft_transport_id { __u8 format; __u8 __resvd1[7]; @@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *); int ft_write_pending_status(struct se_cmd *); u32 ft_get_task_tag(struct se_cmd *); int ft_get_cmd_state(struct se_cmd *); -void ft_new_cmd_failure(struct se_cmd *); int ft_queue_tm_resp(struct se_cmd *); int ft_is_state_remove(struct se_cmd *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 328ea2bccbd..a6bfb6deba9 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -59,33 +59,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) struct fc_exch *ep; struct fc_seq *sp; struct se_cmd *se_cmd; - struct se_mem *mem; - struct se_transport_task *task; - - if (!(ft_debug_logging & FT_DEBUG_IO)) - return; + struct scatterlist *sg; + int count; se_cmd = &cmd->se_cmd; - printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", + pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); - printk(KERN_INFO "%s: cmd %p cdb %p\n", + pr_debug("%s: cmd %p cdb %p\n", caller, cmd, cmd->cdb); - printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - - task = T_TASK(se_cmd); - printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", - caller, cmd, task, task->t_tasks_se_num, - task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); - if (task->t_mem_list) - list_for_each_entry(mem, task->t_mem_list, se_list) - printk(KERN_INFO "%s: cmd %p mem %p page %p " - "len 0x%x off 0x%x\n", - caller, cmd, mem, - mem->se_page, mem->se_len, mem->se_off); + pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); + + pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", + caller, cmd, se_cmd->t_data_nents, + se_cmd->data_length, se_cmd->se_cmd_flags); + + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) + pr_debug("%s: cmd %p sg %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, sg, + sg_page(sg), sg->length, sg->offset); + sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); - printk(KERN_INFO "%s: cmd %p sid %x did %x " + pr_debug("%s: cmd %p sid %x did %x " "ox_id %x rx_id %x seq_id %x e_stat %x\n", caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, sp->id, ep->esb_stat); @@ -96,15 +93,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) { - struct se_queue_obj *qobj; + struct ft_tpg *tpg = sess->tport->tpg; + struct se_queue_obj *qobj = &tpg->qobj; unsigned long flags; qobj = &sess->tport->tpg->qobj; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); - wake_up_interruptible(&qobj->thread_wq); + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + wake_up_process(tpg->thread); } static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) @@ -149,7 +148,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) void ft_check_stop_free(struct se_cmd *se_cmd) { - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); } /* @@ -256,15 +255,14 @@ int ft_write_pending(struct se_cmd *se_cmd) (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* - * Map se_mem list to scatterlist, so that - * DDP can be setup. DDP setup function require - * scatterlist. se_mem_list is internal to - * TCM/LIO target + * cmd may have been broken up into multiple + * tasks. Link their sgs together so we can + * operate on them all at once. */ transport_do_task_sg_chain(se_cmd); - cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; + cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = - T_TASK(se_cmd)->t_tasks_sg_chained_no; + se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, cmd->sg, @@ -295,12 +293,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd) return 0; /* XXX TBD */ } -void ft_new_cmd_failure(struct se_cmd *se_cmd) -{ - /* XXX TBD */ - printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd); -} - /* * FC sequence response handler for follow-on sequences (data) and aborts. */ @@ -313,7 +305,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) /* XXX need to find cmd if queued */ cmd->se_cmd.t_state = TRANSPORT_REMOVE; cmd->seq = NULL; - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); return; } @@ -327,10 +319,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) case FC_RCTL_DD_SOL_CTL: /* transfer ready */ case FC_RCTL_DD_DATA_DESC: /* transfer ready */ default: - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); break; } } @@ -352,7 +344,7 @@ static void ft_send_resp_status(struct fc_lport *lport, struct fcp_resp_rsp_info *info; fh = fc_frame_header_get(rx_fp); - FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", + pr_debug("FCP error response: did %x oxid %x status %x code %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); len = sizeof(*fcp); if (status == SAM_STAT_GOOD) @@ -433,15 +425,15 @@ static void ft_send_tm(struct ft_cmd *cmd) * FCP4r01 indicates having a combination of * tm_flags set is invalid. */ - FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); + pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); return; } - FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); + pr_debug("alloc tm cmd fn %d\n", tm_func); tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); if (!tmr) { - FT_TM_DBG("alloc failed\n"); + pr_debug("alloc failed\n"); ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); return; } @@ -450,20 +442,20 @@ static void ft_send_tm(struct ft_cmd *cmd) switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { + if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { /* * Make sure to clean up newly allocated TMR request * since "unable to handle TMR request because failed * to get to LUN" */ - FT_TM_DBG("Failed to get LUN for TMR func %d, " + pr_debug("Failed to get LUN for TMR func %d, " "se_cmd %p, unpacked_lun %d\n", tm_func, &cmd->se_cmd, cmd->lun); ft_dump_cmd(cmd, __func__); sess = cmd->sess; transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); ft_sess_put(sess); return; } @@ -507,7 +499,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) code = FCP_TMF_FAILED; break; } - FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", + pr_debug("tmr fn %d resp %d fcp code %d\n", tmr->function, tmr->response, code); ft_send_resp_code(cmd, code); return 0; @@ -535,7 +527,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) return; busy: - FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); + pr_debug("cmd or seq allocation failure - sending BUSY\n"); ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ @@ -560,7 +552,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) case FC_RCTL_DD_DATA_DESC: /* transfer ready */ case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ default: - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ @@ -649,7 +641,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun); + ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); if (ret < 0) { ft_dump_cmd(cmd, __func__); transport_send_check_condition_and_sense(&cmd->se_cmd, @@ -659,22 +651,22 @@ static void ft_send_cmd(struct ft_cmd *cmd) ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); - FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); + pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); ft_dump_cmd(cmd, __func__); - if (ret == -1) { + if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); return; } - if (ret == -2) { + if (ret == -EINVAL) { if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) ft_queue_status(se_cmd); else transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); return; } transport_generic_handle_cdb(se_cmd); @@ -682,7 +674,6 @@ static void ft_send_cmd(struct ft_cmd *cmd) err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); - return; } /* @@ -690,7 +681,7 @@ err: */ static void ft_exec_req(struct ft_cmd *cmd) { - FT_IO_DBG("cmd state %x\n", cmd->state); + pr_debug("cmd state %x\n", cmd->state); switch (cmd->state) { case FC_CMD_ST_NEW: ft_send_cmd(cmd); @@ -709,15 +700,12 @@ int ft_thread(void *arg) struct ft_tpg *tpg = arg; struct se_queue_obj *qobj = &tpg->qobj; struct ft_cmd *cmd; - int ret; - - set_user_nice(current, -20); while (!kthread_should_stop()) { - ret = wait_event_interruptible(qobj->thread_wq, - atomic_read(&qobj->queue_cnt) || kthread_should_stop()); - if (ret < 0 || kthread_should_stop()) + schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); + if (kthread_should_stop()) goto out; + cmd = ft_dequeue_cmd(qobj); if (cmd) ft_exec_req(cmd); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 84e868c255d..d63e3dd3b18 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) } err = 4; fail: - FT_CONF_DBG("err %u len %zu pos %u byte %u\n", + pr_debug("err %u len %zu pos %u byte %u\n", err, cp - name, pos, byte); return -1; } @@ -216,14 +216,14 @@ static struct se_node_acl *ft_add_acl( u64 wwpn; u32 q_depth; - FT_CONF_DBG("add acl %s\n", name); + pr_debug("add acl %s\n", name); tpg = container_of(se_tpg, struct ft_tpg, se_tpg); if (ft_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); - if (!(acl)) + if (!acl) return ERR_PTR(-ENOMEM); acl->node_auth.port_name = wwpn; @@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl) struct ft_node_acl *acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG("del acl %s\n", + pr_debug("del acl %s\n", config_item_name(&se_acl->acl_group.cg_item)); tpg = container_of(se_tpg, struct ft_tpg, se_tpg); - FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", + pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", acl, se_acl, tpg, &tpg->se_tpg); core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); @@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) spin_lock_bh(&se_tpg->acl_node_lock); list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG("acl %p port_name %llx\n", + pr_debug("acl %p port_name %llx\n", acl, (unsigned long long)acl->node_auth.port_name); if (acl->node_auth.port_name == rdata->ids.port_name || acl->node_auth.node_name == rdata->ids.node_name) { - FT_CONF_DBG("acl %p port_name %llx matched\n", acl, + pr_debug("acl %p port_name %llx matched\n", acl, (unsigned long long)rdata->ids.port_name); found = acl; /* XXX need to hold onto ACL */ @@ -280,11 +280,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) struct ft_node_acl *acl; acl = kzalloc(sizeof(*acl), GFP_KERNEL); - if (!(acl)) { - printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); + if (!acl) { + pr_err("Unable to allocate struct ft_node_acl\n"); return NULL; } - FT_CONF_DBG("acl %p\n", acl); + pr_debug("acl %p\n", acl); return &acl->se_node_acl; } @@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, struct ft_node_acl *acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG(KERN_INFO "acl %p\n", acl); + pr_debug("acl %p\n", acl); kfree(acl); } @@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg( unsigned long index; int ret; - FT_CONF_DBG("tcm_fc: add tpg %s\n", name); + pr_debug("tcm_fc: add tpg %s\n", name); /* * Name must be "tpgt_" followed by the index. @@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg( transport_init_queue_obj(&tpg->qobj); ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, - (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); + tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; @@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); - FT_CONF_DBG("del tpg %s\n", + pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); kthread_stop(tpg->thread); @@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport( struct ft_lport_acl *old_lacl; u64 wwpn; - FT_CONF_DBG("add lport %s\n", name); + pr_debug("add lport %s\n", name); if (ft_parse_wwn(name, &wwpn, 1) < 0) return NULL; lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); @@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn) struct ft_lport_acl *lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); - FT_CONF_DBG("del lport %s\n", + pr_debug("del lport %s\n", config_item_name(&wwn->wwn_group.cg_item)); mutex_lock(&ft_lport_lock); list_del(&lacl->list); @@ -536,8 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, .tpg_get_inst_index = ft_tpg_get_inst_index, .check_stop_free = ft_check_stop_free, - .release_cmd_to_pool = ft_release_cmd, - .release_cmd_direct = ft_release_cmd, + .release_cmd = ft_release_cmd, .shutdown_session = ft_sess_shutdown, .close_session = ft_sess_close, .stop_session = ft_sess_stop, @@ -550,7 +549,6 @@ static struct target_core_fabric_ops ft_fabric_ops = { .set_default_node_attributes = ft_set_default_node_attr, .get_task_tag = ft_get_task_tag, .get_cmd_state = ft_get_cmd_state, - .new_cmd_failure = ft_new_cmd_failure, .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, @@ -582,10 +580,10 @@ int ft_register_configfs(void) * Register the top level struct config_item_type with TCM core */ fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); - if (!fabric) { - printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", + if (IS_ERR(fabric)) { + pr_err("%s: target_fabric_configfs_init() failed!\n", __func__); - return -1; + return PTR_ERR(fabric); } fabric->tf_ops = ft_fabric_ops; @@ -610,11 +608,8 @@ int ft_register_configfs(void) */ ret = target_fabric_configfs_register(fabric); if (ret < 0) { - FT_CONF_DBG("target_fabric_configfs_register() for" + pr_debug("target_fabric_configfs_register() for" " FC Target failed!\n"); - printk(KERN_INFO - "%s: target_fabric_configfs_register() failed!\n", - __func__); target_fabric_configfs_free(fabric); return -1; } diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8c4a24077d9..11e6483fc12 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -39,6 +39,7 @@ #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> +#include <linux/ratelimit.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -65,21 +66,20 @@ int ft_queue_data_in(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - struct se_transport_task *task; struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; - struct se_mem *mem; + struct scatterlist *sg = NULL; size_t remaining; u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; - u32 mem_off; + u32 mem_off = 0; u32 fh_off = 0; u32 frame_off = 0; size_t frame_len = 0; - size_t mem_len; + size_t mem_len = 0; size_t tlen; size_t off_in_page; - struct page *page; + struct page *page = NULL; int use_sg; int error; void *page_addr; @@ -90,24 +90,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd) lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); - task = T_TASK(se_cmd); - BUG_ON(!task); remaining = se_cmd->data_length; /* - * Setup to use first mem list entry if any. + * Setup to use first mem list entry, unless no data. */ - if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; - } else { - mem = NULL; - mem_len = remaining; - mem_off = 0; - page = NULL; + BUG_ON(remaining && !se_cmd->t_data_sg); + if (remaining) { + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } /* no scatter/gather in skb for odd word length due to fc_seq_send() */ @@ -115,12 +108,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) while (remaining) { if (!mem_len) { - BUG_ON(!mem); - mem = list_entry(mem->se_list.next, - struct se_mem, se_list); - mem_len = min((size_t)mem->se_len, remaining); - mem_off = mem->se_off; - page = mem->se_page; + sg = sg_next(sg); + mem_len = min((size_t)sg->length, remaining); + mem_off = sg->offset; + page = sg_page(sg); } if (!frame_len) { /* @@ -148,18 +139,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) tlen = min(mem_len, frame_len); if (use_sg) { - if (!mem) { - BUG_ON(!task->t_task_buf); - page_addr = task->t_task_buf + mem_off; - /* - * In this case, offset is 'offset_in_page' of - * (t_task_buf + mem_off) instead of 'mem_off'. - */ - off_in_page = offset_in_page(page_addr); - page = virt_to_page(page_addr); - tlen = min(tlen, PAGE_SIZE - off_in_page); - } else - off_in_page = mem_off; + off_in_page = mem_off; BUG_ON(!page); get_page(page); skb_fill_page_desc(fp_skb(fp), @@ -169,7 +149,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) fp_skb(fp)->data_len += tlen; fp_skb(fp)->truesize += PAGE_SIZE << compound_order(page); - } else if (mem) { + } else { BUG_ON(!page); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), KM_SOFTIRQ0); @@ -180,10 +160,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd) memcpy(to, from, tlen); kunmap_atomic(page_addr, KM_SOFTIRQ0); to += tlen; - } else { - from = task->t_task_buf + mem_off; - memcpy(to, from, tlen); - to += tlen; } mem_off += tlen; @@ -201,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) error = lport->tt.seq_send(lport, cmd->seq, fp); if (error) { /* XXX For now, initiator will retry */ - if (printk_ratelimit()) - printk(KERN_ERR "%s: Failed to send frame %p, " + pr_err_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, @@ -221,24 +196,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) struct fc_seq *seq = cmd->seq; struct fc_exch *ep; struct fc_lport *lport; - struct se_transport_task *task; struct fc_frame_header *fh; - struct se_mem *mem; - u32 mem_off; + struct scatterlist *sg = NULL; + u32 mem_off = 0; u32 rel_off; size_t frame_len; - size_t mem_len; + size_t mem_len = 0; size_t tlen; - struct page *page; + struct page *page = NULL; void *page_addr; void *from; void *to; u32 f_ctl; void *buf; - task = T_TASK(se_cmd); - BUG_ON(!task); - fh = fc_frame_header_get(fp); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) goto drop; @@ -251,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) */ buf = fc_frame_payload_get(fp, 1); if (cmd->was_ddp_setup && buf) { - printk(KERN_INFO "%s: When DDP was setup, not expected to" + pr_debug("%s: When DDP was setup, not expected to" "receive frame with payload, Payload shall be" "copied directly to buffer instead of coming " "via. legacy receive queues\n", __func__); @@ -289,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) * this point, but just in case if required in future * for debugging or any other purpose */ - printk(KERN_ERR "%s: Received frame with TSI bit not" + pr_err("%s: Received frame with TSI bit not" " being SET, dropping the frame, " "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", __func__, cmd->sg, cmd->sg_cnt); @@ -312,29 +283,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) frame_len = se_cmd->data_length - rel_off; /* - * Setup to use first mem list entry if any. + * Setup to use first mem list entry, unless no data. */ - if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; - } else { - mem = NULL; - page = NULL; - mem_off = 0; - mem_len = frame_len; + BUG_ON(frame_len && !se_cmd->t_data_sg); + if (frame_len) { + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } while (frame_len) { if (!mem_len) { - BUG_ON(!mem); - mem = list_entry(mem->se_list.next, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; + sg = sg_next(sg); + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } if (rel_off >= mem_len) { rel_off -= mem_len; @@ -347,19 +311,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) tlen = min(mem_len, frame_len); - if (mem) { - to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), - KM_SOFTIRQ0); - page_addr = to; - to += mem_off & ~PAGE_MASK; - tlen = min(tlen, (size_t)(PAGE_SIZE - - (mem_off & ~PAGE_MASK))); - memcpy(to, from, tlen); - kunmap_atomic(page_addr, KM_SOFTIRQ0); - } else { - to = task->t_task_buf + mem_off; - memcpy(to, from, tlen); - } + to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), + KM_SOFTIRQ0); + page_addr = to; + to += mem_off & ~PAGE_MASK; + tlen = min(tlen, (size_t)(PAGE_SIZE - + (mem_off & ~PAGE_MASK))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr, KM_SOFTIRQ0); + from += tlen; frame_len -= tlen; mem_off += tlen; diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 7491e21cc6a..fbcbb3d1d06 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) if (sess->port_id == port_id) { kref_get(&sess->kref); rcu_read_unlock(); - FT_SESS_DBG("port_id %x found %p\n", port_id, sess); + pr_debug("port_id %x found %p\n", port_id, sess); return sess; } } out: rcu_read_unlock(); - FT_SESS_DBG("port_id %x not found\n", port_id); + pr_debug("port_id %x not found\n", port_id); return NULL; } @@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, hlist_add_head_rcu(&sess->hash, head); tport->sess_count++; - FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); + pr_debug("port_id %x sess %p\n", port_id, sess); transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, sess->se_sess, sess); @@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess) { struct ft_sess *sess = se_sess->fabric_sess_ptr; - FT_SESS_DBG("port_id %x\n", sess->port_id); + pr_debug("port_id %x\n", sess->port_id); return 1; } @@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess) mutex_unlock(&ft_lport_lock); return; } - FT_SESS_DBG("port_id %x\n", port_id); + pr_debug("port_id %x\n", port_id); ft_sess_unhash(sess); mutex_unlock(&ft_lport_lock); transport_deregister_session_configfs(se_sess); @@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) { struct ft_sess *sess = se_sess->fabric_sess_ptr; - FT_SESS_DBG("port_id %x\n", sess->port_id); + pr_debug("port_id %x\n", sess->port_id); } int ft_sess_logged_in(struct se_session *se_sess) @@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, mutex_lock(&ft_lport_lock); ret = ft_prli_locked(rdata, spp_len, rspp, spp); mutex_unlock(&ft_lport_lock); - FT_SESS_DBG("port_id %x flags %x ret %x\n", + pr_debug("port_id %x flags %x ret %x\n", rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); return ret; } @@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) struct ft_sess *sess; u32 sid = fc_frame_sid(fp); - FT_SESS_DBG("sid %x\n", sid); + pr_debug("sid %x\n", sid); sess = ft_sess_get(lport, sid); if (!sess) { - FT_SESS_DBG("sid %x sess lookup failed\n", sid); + pr_debug("sid %x sess lookup failed\n", sid); /* TBD XXX - if FCP_CMND, send PRLO */ fc_frame_free(fp); return; |